Merge remote-tracking branch 'prometheus/main' into arve/wlog-histograms

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2024-07-26 11:51:29 +02:00
commit 9af19ed856
159 changed files with 12610 additions and 3529 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -143,6 +143,18 @@ jobs:
with:
parallelism: 12
thread: ${{ matrix.thread }}
build_all_status:
name: Report status of build Prometheus for all architectures
runs-on: ubuntu-latest
needs: [build_all]
if: github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-')
steps:
- name: Successful build
if: ${{ !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled')) }}
run: exit 0
- name: Failing or cancelled build
if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}
run: exit 1
check_generated_parser:
name: Check generated parser
runs-on: ubuntu-latest

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11

View file

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8
uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
with:
sarif_file: results.sarif

View file

@ -1,12 +1,5 @@
run:
timeout: 15m
skip-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
skip-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
output:
sort-results: true
@ -33,6 +26,13 @@ linters:
issues:
max-same-issues: 0
exclude-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
exclude-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
exclude-rules:
- linters:
- gocritic

View file

@ -2,8 +2,19 @@
## unreleased
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444
* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage_<samples|histograms|exemplar>_failed_total` in case of partial errors #14444
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
## 2.53.1 / 2024-07-10
Fix a bug which would drop samples in remote-write if the sending flow stalled
for longer than it takes to write one "WAL segment". How long this takes depends on the size
of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes.
* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446
## 2.53.0 / 2024-06-16
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.

View file

@ -57,7 +57,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -1 +1 @@
2.53.0
2.53.1

View file

@ -194,6 +194,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "extra-scrape-metrics":
c.scrape.ExtraMetrics = true
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
case "metadata-wal-records":
c.scrape.AppendMetadata = true
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
case "new-service-discovery-manager":
c.enableNewSDManager = true
level.Info(logger).Log("msg", "Experimental service discovery manager")
@ -322,9 +325,15 @@ func main() {
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
// TODO(bwplotka): Consider allowing those remote receive flags to be changed in config.
// See https://github.com/prometheus/prometheus/issues/14410
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
@ -646,7 +655,7 @@ func main() {
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)
@ -1767,3 +1776,39 @@ type discoveryManager interface {
Run() error
SyncCh() <-chan map[string][]*targetgroup.Group
}
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
type rwProtoMsgFlagParser struct {
msgs *[]config.RemoteWriteProtoMsg
}
func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value {
return &rwProtoMsgFlagParser{msgs: msgs}
}
// IsCumulative is used by kingpin to tell if it's an array or not.
func (p *rwProtoMsgFlagParser) IsCumulative() bool {
return true
}
func (p *rwProtoMsgFlagParser) String() string {
ss := make([]string, 0, len(*p.msgs))
for _, t := range *p.msgs {
ss = append(ss, string(t))
}
return strings.Join(ss, ",")
}
func (p *rwProtoMsgFlagParser) Set(opt string) error {
t := config.RemoteWriteProtoMsg(opt)
if err := t.Validate(); err != nil {
return err
}
for _, prev := range *p.msgs {
if prev == t {
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
}
}
*p.msgs = append(*p.msgs, t)
return nil
}

View file

@ -30,11 +30,13 @@ import (
"testing"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/rules"
@ -499,3 +501,65 @@ func TestDocumentation(t *testing.T) {
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
}
func TestRwProtoMsgFlagParser(t *testing.T) {
defaultOpts := config.RemoteWriteProtoMsgs{
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
}
for _, tcase := range []struct {
args []string
expected []config.RemoteWriteProtoMsg
expectedErr error
}{
{
args: nil,
expected: defaultOpts,
},
{
args: []string{"--test-proto-msgs", "test"},
expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"),
},
{
args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"},
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2},
},
{
args: []string{
"--test-proto-msgs", "io.prometheus.write.v2.Request",
"--test-proto-msgs", "io.prometheus.write.v2.Request",
},
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"),
},
{
args: []string{
"--test-proto-msgs", "io.prometheus.write.v2.Request",
"--test-proto-msgs", "prometheus.WriteRequest",
},
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1},
},
{
args: []string{
"--test-proto-msgs", "io.prometheus.write.v2.Request",
"--test-proto-msgs", "prometheus.WriteRequest",
"--test-proto-msgs", "io.prometheus.write.v2.Request",
},
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"),
},
} {
t.Run(strings.Join(tcase.args, ","), func(t *testing.T) {
a := kingpin.New("test", "")
var opt []config.RemoteWriteProtoMsg
a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt))
_, err := a.Parse(tcase.args)
if tcase.expectedErr != nil {
require.Error(t, err)
require.Equal(t, tcase.expectedErr, err)
} else {
require.NoError(t, err)
require.Equal(t, tcase.expected, opt)
}
})
}
}

View file

@ -101,6 +101,7 @@ func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[strin
return successExitCode
}
// TODO(bwplotka): Add PRW 2.0 support.
func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]string) bool {
metricsData, err := fmtutil.MetricTextToWriteRequest(bytes.NewReader(data), labels)
if err != nil {
@ -116,7 +117,7 @@ func parseAndPushMetrics(client *remote.Client, data []byte, labels map[string]s
// Encode the request body into snappy encoding.
compressed := snappy.Encode(nil, raw)
err = client.Store(context.Background(), compressed, 0)
_, err = client.Store(context.Background(), compressed, 0)
if err != nil {
fmt.Fprintln(os.Stderr, " FAILED:", err)
return false

View file

@ -180,6 +180,7 @@ var (
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second),
ProtobufMessage: RemoteWriteProtoMsgV1,
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.DefaultHTTPClientConfig,
@ -226,6 +227,9 @@ var (
DefaultExemplarsConfig = ExemplarsConfig{
MaxExemplars: 100000,
}
// DefaultOTLPConfig is the default OTLP configuration.
DefaultOTLPConfig = OTLPConfig{}
)
// Config is the top-level configuration for Prometheus's config files.
@ -241,6 +245,7 @@ type Config struct {
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
}
// SetDirectory joins any relative file paths with dir.
@ -279,7 +284,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
jobNames := map[string]string{}
for i, scfg := range c.ScrapeConfigs {
// We do these checks for library users that would not call Validate in
// We do these checks for library users that would not call validate in
// Unmarshal.
if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, err
@ -1055,6 +1060,49 @@ func CheckTargetAddress(address model.LabelValue) error {
return nil
}
// RemoteWriteProtoMsg represents the known protobuf message for the remote write
// 1.0 and 2.0 specs.
type RemoteWriteProtoMsg string
// Validate returns error if the given reference for the protobuf message is not supported.
func (s RemoteWriteProtoMsg) Validate() error {
switch s {
case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
return nil
default:
return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
}
}
type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
func (m RemoteWriteProtoMsgs) Strings() []string {
ret := make([]string, 0, len(m))
for _, typ := range m {
ret = append(ret, string(typ))
}
return ret
}
func (m RemoteWriteProtoMsgs) String() string {
return strings.Join(m.Strings(), ", ")
}
var (
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.
RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
// RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
// message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.
RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
)
// RemoteWriteConfig is the configuration for writing to remote storage.
type RemoteWriteConfig struct {
URL *config.URL `yaml:"url"`
@ -1064,6 +1112,9 @@ type RemoteWriteConfig struct {
Name string `yaml:"name,omitempty"`
SendExemplars bool `yaml:"send_exemplars,omitempty"`
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
// ProtobufMessage specifies the protobuf message to use against the remote
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@ -1098,6 +1149,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return err
}
if err := c.ProtobufMessage.Validate(); err != nil {
return fmt.Errorf("invalid protobuf_message value: %w", err)
}
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
@ -1253,3 +1308,35 @@ func getGoGCEnv() int {
}
return DefaultRuntimeConfig.GoGC
}
// OTLPConfig is the configuration for writing to the OTLP endpoint.
type OTLPConfig struct {
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultOTLPConfig
type plain OTLPConfig
if err := unmarshal((*plain)(c)); err != nil {
return err
}
seen := map[string]struct{}{}
var err error
for i, attr := range c.PromoteResourceAttributes {
attr = strings.TrimSpace(attr)
if attr == "" {
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
continue
}
if _, exists := seen[attr]; exists {
err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr))
continue
}
seen[attr] = struct{}{}
c.PromoteResourceAttributes[i] = attr
}
return err
}

View file

@ -108,9 +108,10 @@ var expectedConf = &Config{
RemoteWriteConfigs: []*RemoteWriteConfig{
{
URL: mustParseURL("http://remote1/push"),
RemoteTimeout: model.Duration(30 * time.Second),
Name: "drop_expensive",
URL: mustParseURL("http://remote1/push"),
ProtobufMessage: RemoteWriteProtoMsgV1,
RemoteTimeout: model.Duration(30 * time.Second),
Name: "drop_expensive",
WriteRelabelConfigs: []*relabel.Config{
{
SourceLabels: model.LabelNames{"__name__"},
@ -137,11 +138,12 @@ var expectedConf = &Config{
},
},
{
URL: mustParseURL("http://remote2/push"),
RemoteTimeout: model.Duration(30 * time.Second),
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
Name: "rw_tls",
URL: mustParseURL("http://remote2/push"),
ProtobufMessage: RemoteWriteProtoMsgV2,
RemoteTimeout: model.Duration(30 * time.Second),
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
Name: "rw_tls",
HTTPClientConfig: config.HTTPClientConfig{
TLSConfig: config.TLSConfig{
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
@ -154,6 +156,12 @@ var expectedConf = &Config{
},
},
OTLPConfig: OTLPConfig{
PromoteResourceAttributes: []string{
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
},
},
RemoteReadConfigs: []*RemoteReadConfig{
{
URL: mustParseURL("http://remote1/read"),
@ -1469,6 +1477,26 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
}
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
var got Config
require.NoError(t, yaml.UnmarshalStrict(out, &got))
require.Equal(t, []string{"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"}, got.OTLPConfig.PromoteResourceAttributes)
})
t.Run("bad config", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
})
}
func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default.
@ -1800,6 +1828,10 @@ var expectedErrors = []struct {
filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
},
{
filename: "remote_write_wrong_msg.bad.yml",
errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`,
},
{
filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`,

View file

@ -37,6 +37,7 @@ remote_write:
key_file: valid_key_file
- url: http://remote2/push
protobuf_message: io.prometheus.write.v2.Request
name: rw_tls
tls_config:
cert_file: valid_cert_file
@ -44,6 +45,9 @@ remote_write:
headers:
name: value
otlp:
promote_resource_attributes: ["k8s.cluster.name", "k8s.job.name", "k8s.namespace.name"]
remote_read:
- url: http://remote1/read
read_recent: true

View file

@ -0,0 +1,2 @@
otlp:
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name", "k8s.job.name", ""]

View file

@ -0,0 +1,2 @@
otlp:
promote_resource_attributes: ["k8s.cluster.name", " k8s.job.name ", "k8s.namespace.name"]

View file

@ -0,0 +1,3 @@
remote_write:
- url: localhost:9090
protobuf_message: io.prometheus.writet.v2.Request # typo in 'write"

View file

@ -17,7 +17,7 @@ import (
"context"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/prometheus/prometheus/util/strutil"
@ -34,7 +34,7 @@ const (
)
func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix string) (map[string]map[string]string, error) {
networks, err := client.NetworkList(ctx, types.NetworkListOptions{})
networks, err := client.NetworkList(ctx, network.ListOptions{})
if err != nil {
return nil, err
}

View file

@ -26,10 +26,11 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.enable-lifecycle</code> | Enable shutdown and reload via HTTP request. | `false` |
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
| <code class="text-nowrap">--web.remote-write-receiver.accepted-protobuf-messages</code> | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` |
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` |
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |

View file

@ -152,6 +152,10 @@ alerting:
remote_write:
[ - <remote_write> ... ]
# Settings related to the OTLP receiver feature.
otlp:
[ promote_resource_attributes: [<string>, ...] | default = [ ] ]
# Settings related to the remote read feature.
remote_read:
[ - <remote_read> ... ]
@ -458,13 +462,15 @@ metric_relabel_configs:
[ keep_dropped_targets: <int> | default = 0 ]
# Limit on total number of positive and negative buckets allowed in a single
# native histogram. If this is exceeded, the entire scrape will be treated as
# failed. 0 means no limit.
# native histogram. The resolution of a histogram with more buckets will be
# reduced until the number of buckets is within the limit. If the limit cannot
# be reached, the scrape will fail.
# 0 means no limit.
[ native_histogram_bucket_limit: <int> | default = 0 ]
# Lower limit for the growth factor of one bucket to the next in each native
# histogram. The resolution of a histogram with a lower growth factor will be
# reduced until it is within the limit.
# reduced as much as possible until it is within the limit.
# To set an upper limit for the schema (equivalent to "scale" in OTel's
# exponential histograms), use the following factor limits:
#
@ -3575,6 +3581,17 @@ this functionality.
# The URL of the endpoint to send samples to.
url: <string>
# protobuf message to use when writing to the remote write endpoint.
#
# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which
# will be deprecated eventually.
# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former,
# by improving efficiency and sending metadata, created timestamp and native histograms by default.
#
# Before changing this value, consult with your remote storage provider (or test) what message it supports.
# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request
[ protobuf_message: <prometheus.WriteRequest | io.prometheus.write.v2.Request> | default = prometheus.WriteRequest ]
# Timeout for requests to the remote write endpoint.
[ remote_timeout: <duration> | default = 30s ]
@ -3596,6 +3613,7 @@ write_relabel_configs:
[ send_exemplars: <boolean> | default = false ]
# Enables sending of native histograms, also known as sparse histograms, over remote write.
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
[ send_native_histograms: <boolean> | default = false ]
# Sets the `Authorization` header on every remote write request with the
@ -3609,7 +3627,7 @@ basic_auth:
# Optional `Authorization` header configuration.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
[ type: <string> | default = Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
@ -3673,7 +3691,7 @@ tls_config:
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <boolean> | default: false ]
[ proxy_from_environment: <boolean> | default = false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -3682,7 +3700,7 @@ tls_config:
[ follow_redirects: <boolean> | default = true ]
# Whether to enable HTTP2.
[ enable_http2: <boolean> | default: true ]
[ enable_http2: <boolean> | default = true ]
# Configures the queue used to write to remote storage.
queue_config:
@ -3712,7 +3730,10 @@ queue_config:
# which means that all samples are sent.
[ sample_age_limit: <duration> | default = 0s ]
# Configures the sending of series metadata to remote storage.
# Configures the sending of series metadata to remote storage
# if the `prometheus.WriteRequest` message was chosen. When
# `io.prometheus.write.v2.Request` is used, metadata is always sent.
#
# Metadata configuration is subject to change at any point
# or be removed in future releases.
metadata_config:

View file

@ -224,3 +224,13 @@ When the `concurrent-rule-eval` feature flag is enabled, rules without any depen
This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load.
The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default.
## Metadata WAL Records
`--enable-feature=metadata-wal-records`
When enabled, Prometheus will store metadata in-memory and keep track of
metadata changes as WAL records on a per-series basis.
This must be used if
you are also using remote write 2.0 as it will only gather metadata from the WAL.

View file

@ -25,8 +25,10 @@ Other non-`2xx` codes may be returned for errors occurring before the API
endpoint is reached.
An array of warnings may be returned if there are errors that do
not inhibit the request execution. All of the data that was successfully
collected will be returned in the data field.
not inhibit the request execution. An additional array of info-level
annotations may be returned for potential query issues that may or may
not be false positives. All of the data that was successfully collected
will be returned in the data field.
The JSON response envelope format is as follows:
@ -40,9 +42,11 @@ The JSON response envelope format is as follows:
"errorType": "<string>",
"error": "<string>",
// Only if there were warnings while executing the request.
// Only set if there were warnings while executing the request.
// There will still be data in the data field.
"warnings": ["<string>"]
"warnings": ["<string>"],
// Only set if there were info-level annnotations while executing the request.
"infos": ["<string>"]
}
```
@ -256,7 +260,7 @@ URL query parameters:
series to return. At least one `match[]` argument must be provided.
- `start=<rfc3339 | unix_timestamp>`: Start timestamp.
- `end=<rfc3339 | unix_timestamp>`: End timestamp.
- `limit=<number>`: Maximum number of returned series. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
@ -307,7 +311,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label names. Optional.
- `limit=<number>`: Maximum number of returned series. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label names.
@ -358,7 +362,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label values. Optional.
- `limit=<number>`: Maximum number of returned series. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label values.
@ -452,7 +456,7 @@ raw numbers.
The keys `"histogram"` and `"histograms"` only show up if the experimental
native histograms are present in the response. Their placeholder `<histogram>`
is explained in detail in its own section below.
is explained in detail in its own section below.
### Range vectors
@ -470,7 +474,7 @@ Range vectors are returned as result type `matrix`. The corresponding
]
```
Each series could have the `"values"` key, or the `"histograms"` key, or both.
Each series could have the `"values"` key, or the `"histograms"` key, or both.
For a given timestamp, there will only be one sample of either float or histogram type.
Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort)
@ -689,7 +693,8 @@ URL query parameters:
- `rule_name[]=<string>`: only return rules with the given rule name. If the parameter is repeated, rules with any of the provided names are returned. If we've filtered out all the rules of a group, the group is not returned. When the parameter is absent or empty, no filtering is done.
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
```json
$ curl http://localhost:9090/api/v1/rules
@ -1309,7 +1314,7 @@ endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview).
## OTLP Receiver
Prometheus can be configured as a receiver for the OTLP Metrics protocol. This
Prometheus can be configured as a receiver for the OTLP Metrics protocol. This
is not considered an efficient way of ingesting samples. Use it
with caution for specific low-volume use cases. It is not suitable for
replacing the ingestion via scraping.

View file

@ -8,9 +8,15 @@ sort_rank: 1
Prometheus provides a functional query language called PromQL (Prometheus Query
Language) that lets the user select and aggregate time series data in real
time. The result of an expression can either be shown as a graph, viewed as
tabular data in Prometheus's expression browser, or consumed by external
systems via the [HTTP API](api.md).
time.
When you send a query request to Prometheus, it can be an _instant query_, evaluated at one point in time,
or a _range query_ at equally-spaced steps between a start and an end time. PromQL works exactly the same
in each cases; the range query is just like an instant query run multiple times at different timestamps.
In the Prometheus UI, the "Table" tab is for instant queries and the "Graph" tab is for range queries.
Other programs can fetch the result of a PromQL expression via the [HTTP API](api.md).
## Examples
@ -81,12 +87,20 @@ Examples:
0x8f
-Inf
NaN
As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change.
Examples:
1s # Equivalent to 1.0
2m # Equivalent to 120.0
1ms # Equivalent to 0.001
## Time series selectors
Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values.
Time series *selectors* are not to be confused with higher level concept of instant and range *queries* that can execute the time series *selectors*. A higher level instant query would evaluate the given selector at one point in time, however the range query would evaluate the selector at multiple different times in between a minimum and maximum timestamp at regular steps.
These are the basic building-blocks that instruct PromQL what data to fetch.
### Instant vector selectors
@ -224,6 +238,15 @@ Here are some examples of valid time durations:
5m
10s
As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change.
Examples:
1.0 # Equivalent to 1s
0.001 # Equivalent to 1ms
120 # Equivalent to 2m
### Offset modifier
The `offset` modifier allows changing the time offset for individual

View file

@ -95,3 +95,13 @@ Assuming this metric contains one time series per running instance, you could
count the number of running instances per application like this:
count by (app) (instance_cpu_time_ns)
If we are exploring some metrics for their labels, to e.g. be able to aggregate
over some of them, we could use the following:
limitk(10, app_foo_metric_bar)
Alternatively, if we wanted the returned timeseries to be more evenly sampled,
we could use the following to get approximately 10% of them:
limit_ratio(0.1, app_foo_metric_bar)

View file

@ -98,8 +98,9 @@ vector.
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
Special cases:
- Return an empty vector if `min > max`
- Return `NaN` if `min` or `max` is `NaN`
* Return an empty vector if `min > max`
* Return `NaN` if `min` or `max` is `NaN`
## `clamp_max()`
@ -349,8 +350,8 @@ a histogram.
Buckets of classic histograms are cumulative. Therefore, the following should always be the case:
- The counts in the buckets are monotonically increasing (strictly non-decreasing).
- A lack of observations between the upper limits of two consecutive buckets results in equal counts
* The counts in the buckets are monotonically increasing (strictly non-decreasing).
* A lack of observations between the upper limits of two consecutive buckets results in equal counts
in those two buckets.
However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets
@ -692,21 +693,21 @@ ignore histogram samples.
The trigonometric functions work in radians:
- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
The following are useful for converting between degrees and radians:
- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
- `pi()`: returns pi.
- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.
* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
* `pi()`: returns pi.
* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.

View file

@ -230,6 +230,8 @@ vector of fewer elements with aggregated values:
* `bottomk` (smallest k elements by sample value)
* `topk` (largest k elements by sample value)
* `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
* `limitk` (sample n elements)
* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`)
These operators can either be used to aggregate over **all** label dimensions
or preserve distinct dimensions by including a `without` or `by` clause. These
@ -249,8 +251,8 @@ all other labels are preserved in the output. `by` does the opposite and drops
labels that are not listed in the `by` clause, even if their label values are
identical between all elements of the vector.
`parameter` is only required for `count_values`, `quantile`, `topk` and
`bottomk`.
`parameter` is only required for `count_values`, `quantile`, `topk`,
`bottomk`, `limitk` and `limit_ratio`.
`count_values` outputs one time series per unique sample value. Each series has
an additional label. The name of that label is given by the aggregation
@ -261,11 +263,16 @@ time series is the number of times that sample value was present.
the input samples, including the original labels, are returned in the result
vector. `by` and `without` are only used to bucket the input vector.
`limitk` and `limit_ratio` also return a subset of the input samples,
including the original labels in the result vector, these are experimental
operators that must be enabled with `--enable-feature=promql-experimental-functions`.
`quantile` calculates the φ-quantile, the value that ranks at number φ*N among
the N metric values of the dimensions aggregated over. φ is provided as the
aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
Example:
If the metric `http_requests_total` had time series that fan out by
@ -291,6 +298,33 @@ To get the 5 largest HTTP requests counts across all instances we could write:
topk(5, http_requests_total)
To sample 10 timeseries, for example to inspect labels and their values, we
could write:
limitk(10, http_requests_total)
To deterministically sample approximately 10% of timeseries we could write:
limit_ratio(0.1, http_requests_total)
Given that `limit_ratio()` implements a deterministic sampling algorithm (based
on labels' hash), you can get the _complement_ of the above samples, i.e.
approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)`
with:
limit_ratio(-0.9, http_requests_total)
You can also use this feature to e.g. verify that `avg()` is a representative
aggregation for your samples' values, by checking that the difference between
averaging two samples' subsets is "small" when compared to the standard
deviation.
abs(
avg(limit_ratio(0.5, http_requests_total))
-
avg(limit_ratio(-0.5, http_requests_total))
) <= bool stddev(http_requests_total)
## Binary operator precedence
The following list shows the precedence of binary operators in Prometheus, from

View file

@ -137,6 +137,18 @@ will be used.
Expired block cleanup happens in the background. It may take up to two hours
to remove expired blocks. Blocks must be fully expired before they are removed.
## Right-Sizing Retention Size
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
will want to consider the right size for this value relative to the storage you
have allocated for Prometheus. It is wise to reduce the retention size to provide
a buffer, ensuring that older entries will be removed before the allocated storage
for Prometheus becomes full.
At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entires
will be removed prior to hitting any disk limitations.
## Remote storage integrations
Prometheus's local storage is limited to a single node's scalability and durability.

View file

@ -7,6 +7,7 @@ To use it:
```
go build
./example_write_adapter
```
@ -15,10 +16,19 @@ go build
```yaml
remote_write:
- url: "http://localhost:1234/receive"
protobuf_message: "io.prometheus.write.v2.Request"
```
Then start Prometheus:
or for deprecated Remote Write 1.0 message:
```yaml
remote_write:
- url: "http://localhost:1234/receive"
protobuf_message: "prometheus.WriteRequest"
```
Then start Prometheus (in separate terminal):
```
./prometheus
./prometheus --enable-feature=metadata-wal-records
```

View file

@ -18,44 +18,103 @@ import (
"log"
"net/http"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage/remote"
)
func main() {
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
enc := r.Header.Get("Content-Encoding")
if enc == "" {
http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType)
return
}
if enc != "snappy" {
http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType)
return
}
for _, ts := range req.Timeseries {
m := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
fmt.Println(m)
contentType := r.Header.Get("Content-Type")
if contentType == "" {
http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType)
}
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
defer func() { _ = r.Body.Close() }()
for _, e := range ts.Exemplars {
m := make(model.Metric, len(e.Labels))
for _, l := range e.Labels {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
}
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
// Very simplistic content parsing, see
// storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example.
switch contentType {
case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest":
req, err := remote.DecodeWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, hp := range ts.Histograms {
h := remote.HistogramProtoToHistogram(hp)
fmt.Printf("\tHistogram: %s\n", h.String())
printV1(req)
case "application/x-protobuf;proto=io.prometheus.write.v2.Request":
req, err := remote.DecodeWriteV2Request(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
printV2(req)
default:
msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
fmt.Println(msg)
http.Error(w, msg, http.StatusBadRequest)
}
})
log.Fatal(http.ListenAndServe(":1234", nil))
}
func printV1(req *prompb.WriteRequest) {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
fmt.Println(ts.ToLabels(&b, nil))
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil)
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
continue
}
h := hp.ToIntHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
}
func printV2(req *writev2.Request) {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
l := ts.ToLabels(&b, req.Symbols)
m := ts.ToMetadata(req.Symbols)
fmt.Println(l, m)
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, req.Symbols)
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
continue
}
h := hp.ToIntHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
}

View file

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.21
go 1.21.0
require (
github.com/alecthomas/kingpin/v2 v2.4.0
@ -9,18 +9,18 @@ require (
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/common v0.54.0
github.com/prometheus/prometheus v0.52.1
github.com/prometheus/common v0.55.0
github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.51.25 // indirect
github.com/aws/aws-sdk-go v1.53.16 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -31,8 +31,7 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@ -41,31 +40,31 @@ require (
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opentelemetry.io/collector/featuregate v1.5.0 // indirect
go.opentelemetry.io/collector/pdata v1.5.0 // indirect
go.opentelemetry.io/collector/semconv v0.98.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
go.opentelemetry.io/otel v1.25.0 // indirect
go.opentelemetry.io/otel/metric v1.25.0 // indirect
go.opentelemetry.io/otel/trace v1.25.0 // indirect
go.opentelemetry.io/collector/pdata v1.8.0 // indirect
go.opentelemetry.io/collector/semconv v0.101.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
go.opentelemetry.io/otel v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/oauth2 v0.19.0 // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/protobuf v1.34.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.29.3 // indirect
@ -81,4 +80,10 @@ exclude (
cloud.google.com/go v0.34.0
cloud.google.com/go v0.65.0
cloud.google.com/go v0.82.0
// Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules.
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
)
// TODO(bwplotka): Move to main branch commit or perhaps released version.
replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c

View file

@ -2,10 +2,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -37,8 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y=
github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w=
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -68,8 +68,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@ -95,8 +95,8 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
@ -135,40 +135,38 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA=
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
@ -208,14 +206,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI=
github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI=
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
@ -243,8 +241,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -269,22 +267,22 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4=
github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -306,20 +304,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -330,14 +326,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -351,12 +347,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -380,38 +376,37 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -419,8 +414,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

100
go.mod
View file

@ -1,22 +1,24 @@
module github.com/prometheus/prometheus
go 1.21
go 1.21.0
toolchain go1.22.5
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.5.1
github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
github.com/aws/aws-sdk-go v1.53.16
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
github.com/aws/aws-sdk-go v1.54.19
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.117.0
github.com/docker/docker v26.1.3+incompatible
github.com/digitalocean/godo v1.118.0
github.com/docker/docker v27.0.3+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0
github.com/envoyproxy/protoc-gen-validate v1.0.4
@ -29,75 +31,75 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.6.0
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud v1.12.0
github.com/gophercloud/gophercloud v1.13.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.29.1
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d
github.com/hetznercloud/hcloud-go/v2 v2.9.0
github.com/hashicorp/consul/api v1.29.2
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.10.2
github.com/ionos-cloud/sdk-go/v6 v6.1.11
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.8
github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.35.0
github.com/miekg/dns v1.1.59
github.com/linode/linodego v1.37.0
github.com/miekg/dns v1.1.61
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
github.com/oklog/run v1.1.0
github.com/oklog/ulid v1.3.1
github.com/ovh/go-ovh v1.5.1
github.com/ovh/go-ovh v1.6.0
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.54.0
github.com/prometheus/common v0.55.0
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.11.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.8.0
go.opentelemetry.io/collector/semconv v0.101.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
go.opentelemetry.io/otel/sdk v1.27.0
go.opentelemetry.io/otel/trace v1.27.0
go.opentelemetry.io/collector/pdata v1.12.0
go.opentelemetry.io/collector/semconv v0.105.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0
go.opentelemetry.io/otel/sdk v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
golang.org/x/net v0.26.0
golang.org/x/net v0.27.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.21.0
golang.org/x/sys v0.22.0
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.22.0
google.golang.org/api v0.183.0
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
google.golang.org/grpc v1.64.0
google.golang.org/protobuf v1.34.1
golang.org/x/tools v0.23.0
google.golang.org/api v0.188.0
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3
k8s.io/apimachinery v0.29.3
k8s.io/client-go v0.29.3
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.120.1
k8s.io/klog/v2 v2.130.1
)
require (
cloud.google.com/go/auth v0.5.1 // indirect
cloud.google.com/go/auth v0.7.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
cloud.google.com/go/compute/metadata v0.4.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
@ -105,7 +107,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cilium/ebpf v0.11.0 // indirect
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@ -119,7 +121,7 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.22.2 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
@ -132,7 +134,7 @@ require (
github.com/go-resty/resty/v2 v2.13.1 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/glog v1.2.0 // indirect
github.com/golang/glog v1.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
@ -140,7 +142,7 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
@ -176,20 +178,20 @@ require (
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/term v0.21.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
golang.org/x/mod v0.19.0 // indirect
golang.org/x/term v0.22.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect

197
go.sum
View file

@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts=
cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -36,12 +36,12 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
@ -75,8 +75,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI=
github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
@ -143,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -210,8 +210,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0=
@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -353,10 +353,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg=
github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -409,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -454,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -500,8 +500,8 @@ github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwU
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -573,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@ -639,8 +639,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -650,8 +650,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
@ -694,6 +694,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -723,28 +724,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA=
go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@ -773,8 +774,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -809,8 +810,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -856,8 +857,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -946,16 +947,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1025,8 +1026,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1046,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=
google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1084,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1106,8 +1107,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1119,8 +1120,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -38,10 +38,10 @@ func (ls Labels) Bytes(buf []byte) []byte {
b.WriteByte(labelSep)
for i, l := range ls {
if i > 0 {
b.WriteByte(seps[0])
b.WriteByte(sep)
}
b.WriteString(l.Name)
b.WriteByte(seps[0])
b.WriteByte(sep)
b.WriteString(l.Value)
}
return b.Bytes()
@ -86,9 +86,9 @@ func (ls Labels) Hash() uint64 {
}
b = append(b, v.Name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, v.Value...)
b = append(b, seps[0])
b = append(b, sep)
}
return xxhash.Sum64(b)
}
@ -106,9 +106,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
i++
default:
b = append(b, ls[i].Name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, ls[i].Value...)
b = append(b, seps[0])
b = append(b, sep)
i++
j++
}
@ -130,9 +130,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue
}
b = append(b, ls[i].Name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, ls[i].Value...)
b = append(b, seps[0])
b = append(b, sep)
}
return xxhash.Sum64(b), b
}
@ -151,10 +151,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
i++
default:
if b.Len() > 1 {
b.WriteByte(seps[0])
b.WriteByte(sep)
}
b.WriteString(ls[i].Name)
b.WriteByte(seps[0])
b.WriteByte(sep)
b.WriteString(ls[i].Value)
i++
j++
@ -177,10 +177,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
continue
}
if b.Len() > 1 {
b.WriteByte(seps[0])
b.WriteByte(sep)
}
b.WriteString(ls[i].Name)
b.WriteByte(seps[0])
b.WriteByte(sep)
b.WriteString(ls[i].Value)
}
return b.Bytes()

View file

@ -29,10 +29,11 @@ const (
BucketLabel = "le"
InstanceName = "instance"
labelSep = '\xfe'
labelSep = '\xfe' // Used at beginning of `Bytes` return.
sep = '\xff' // Used between labels in `Bytes` and `Hash`.
)
var seps = []byte{'\xff'}
var seps = []byte{sep} // Used with Hash, which has no WriteByte method.
// Label is a key/value pair of strings.
type Label struct {

View file

@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte {
b := bytes.NewBuffer(buf[:0])
for i := 0; i < len(ls.data); {
if i > 0 {
b.WriteByte(seps[0])
b.WriteByte(sep)
}
var name, value string
name, i = decodeString(ls.syms, ls.data, i)
value, i = decodeString(ls.syms, ls.data, i)
b.WriteString(name)
b.WriteByte(seps[0])
b.WriteByte(sep)
b.WriteString(value)
}
return b.Bytes()
@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 {
}
b = append(b, name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, value...)
b = append(b, seps[0])
b = append(b, sep)
pos = newPos
}
return xxhash.Sum64(b)
@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
}
if name == names[j] {
b = append(b, name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, value...)
b = append(b, seps[0])
b = append(b, sep)
}
}
@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue
}
b = append(b, name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, value...)
b = append(b, seps[0])
b = append(b, sep)
}
return xxhash.Sum64(b), b
}
@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte {
}
if lName == names[j] {
if b.Len() > 1 {
b.WriteByte(seps[0])
b.WriteByte(sep)
}
b.WriteString(lName)
b.WriteByte(seps[0])
b.WriteByte(sep)
b.WriteString(lValue)
}
pos = newPos
@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte {
}
if j == len(names) || lName != names[j] {
if b.Len() > 1 {
b.WriteByte(seps[0])
b.WriteByte(sep)
}
b.WriteString(lName)
b.WriteByte(seps[0])
b.WriteByte(sep)
b.WriteString(lValue)
}
pos = newPos

View file

@ -112,9 +112,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
}
if name == names[j] {
b = append(b, name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, value...)
b = append(b, seps[0])
b = append(b, sep)
}
}
@ -138,9 +138,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
continue
}
b = append(b, name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, value...)
b = append(b, seps[0])
b = append(b, sep)
}
return xxhash.Sum64(b), b
}

View file

@ -28,7 +28,7 @@ const (
maxSetMatches = 256
// The minimum number of alternate values a regex should have to trigger
// the optimization done by optimizeEqualStringMatchers() and so use a map
// the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map
// to match values instead of iterating over a list. This value has
// been computed running BenchmarkOptimizeEqualStringMatchers.
minEqualMultiStringMatcherMapThreshold = 16
@ -337,7 +337,7 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
return nil, nil
}
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates)
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0)
for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
// Split the string into the next literal and the remainder
@ -412,7 +412,7 @@ func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
clearBeginEndText(re)
m := stringMatcherFromRegexpInternal(re)
m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
return m
}
@ -549,11 +549,7 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
// Right matcher with 1 fixed set match.
case left == nil && len(matches) == 1:
return &literalPrefixStringMatcher{
prefix: matches[0],
prefixCaseSensitive: matchesCaseSensitive,
right: right,
}
return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right)
// Left matcher with 1 fixed set match.
case right == nil && len(matches) == 1:
@ -631,21 +627,47 @@ func (m *containsStringMatcher) Matches(s string) bool {
return false
}
// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher.
type literalPrefixStringMatcher struct {
prefix string
prefixCaseSensitive bool
func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher {
if prefixCaseSensitive {
return &literalPrefixSensitiveStringMatcher{
prefix: prefix,
right: right,
}
}
return &literalPrefixInsensitiveStringMatcher{
prefix: prefix,
right: right,
}
}
// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher.
type literalPrefixSensitiveStringMatcher struct {
prefix string
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *literalPrefixStringMatcher) Matches(s string) bool {
// Ensure the prefix matches.
if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) {
func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool {
if !strings.HasPrefix(s, m.prefix) {
return false
}
if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) {
// Ensure the right side matches.
return m.right.Matches(s[len(m.prefix):])
}
// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher.
type literalPrefixInsensitiveStringMatcher struct {
prefix string
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool {
if !hasPrefixCaseInsensitive(s, m.prefix) {
return false
}
@ -710,17 +732,20 @@ func (m *equalStringMatcher) Matches(s string) bool {
type multiStringMatcherBuilder interface {
StringMatcher
add(s string)
addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher)
setMatches() []string
}
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder {
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder {
// If the estimated size is low enough, it's faster to use a slice instead of a map.
if estimatedSize < minEqualMultiStringMatcherMapThreshold {
if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 {
return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
}
return &equalMultiStringMapMatcher{
values: make(map[string]struct{}, estimatedSize),
prefixes: make(map[string][]StringMatcher, estimatedPrefixes),
minPrefixLen: minPrefixLength,
caseSensitive: caseSensitive,
}
}
@ -736,6 +761,10 @@ func (m *equalMultiStringSliceMatcher) add(s string) {
m.values = append(m.values, s)
}
func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) {
panic("not implemented")
}
func (m *equalMultiStringSliceMatcher) setMatches() []string {
return m.values
}
@ -757,12 +786,17 @@ func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
return false
}
// equalMultiStringMapMatcher matches a string exactly against a map of valid values.
// equalMultiStringMapMatcher matches a string exactly against a map of valid values
// or against a set of prefix matchers.
type equalMultiStringMapMatcher struct {
// values contains values to match a string against. If the matching is case insensitive,
// the values here must be lowercase.
values map[string]struct{}
// prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string.
// If the matching is case insensitive, prefixes are all lowercase.
prefixes map[string][]StringMatcher
// minPrefixLen can be zero, meaning there are no prefix matchers.
minPrefixLen int
caseSensitive bool
}
@ -774,8 +808,27 @@ func (m *equalMultiStringMapMatcher) add(s string) {
m.values[s] = struct{}{}
}
func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) {
if m.minPrefixLen == 0 {
panic("addPrefix called when no prefix length defined")
}
if len(prefix) < m.minPrefixLen {
panic("addPrefix called with a too short prefix")
}
if m.caseSensitive != prefixCaseSensitive {
panic("addPrefix called with a prefix whose case sensitivity is different than the expected one")
}
s := prefix[:m.minPrefixLen]
if !m.caseSensitive {
s = strings.ToLower(s)
}
m.prefixes[s] = append(m.prefixes[s], matcher)
}
func (m *equalMultiStringMapMatcher) setMatches() []string {
if len(m.values) >= maxSetMatches {
if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 {
return nil
}
@ -791,8 +844,17 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool {
s = toNormalisedLower(s)
}
_, ok := m.values[s]
return ok
if _, ok := m.values[s]; ok {
return true
}
if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen {
for _, matcher := range m.prefixes[s[:m.minPrefixLen]] {
if matcher.Matches(s) {
return true
}
}
}
return false
}
// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
@ -875,20 +937,24 @@ func (m trueMatcher) Matches(_ string) bool {
return true
}
// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In
// this specific case, when we have many strings to match against we can use a map instead
// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or
// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher).
//
// In this specific case, when we have many strings to match against we can use a map instead
// of iterating over the list of strings.
func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher {
func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher {
var (
caseSensitive bool
caseSensitiveSet bool
numValues int
numPrefixes int
minPrefixLength int
)
// Analyse the input StringMatcher to count the number of occurrences
// and ensure all of them have the same case sensitivity.
analyseCallback := func(matcher *equalStringMatcher) bool {
analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool {
// Ensure we don't have mixed case sensitivity.
if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
return false
@ -901,34 +967,55 @@ func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatch
return true
}
if !findEqualStringMatchers(input, analyseCallback) {
analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
// Ensure we don't have mixed case sensitivity.
if caseSensitiveSet && caseSensitive != prefixCaseSensitive {
return false
} else if !caseSensitiveSet {
caseSensitive = prefixCaseSensitive
caseSensitiveSet = true
}
if numPrefixes == 0 || len(prefix) < minPrefixLength {
minPrefixLength = len(prefix)
}
numPrefixes++
return true
}
if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) {
return input
}
// If the number of values found is less than the threshold, then we should skip the optimization.
if numValues < threshold {
// If the number of values and prefixes found is less than the threshold, then we should skip the optimization.
if (numValues + numPrefixes) < threshold {
return input
}
// Parse again the input StringMatcher to extract all values and storing them.
// We can skip the case sensitivity check because we've already checked it and
// if the code reach this point then it means all matchers have the same case sensitivity.
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues)
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength)
// Ignore the return value because we already iterated over the input StringMatcher
// and it was all good.
findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
multiMatcher.add(matcher.s)
return true
}, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
multiMatcher.addPrefix(prefix, caseSensitive, matcher)
return true
})
return multiMatcher
}
// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each
// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only*
// composed by an alternation of equalStringMatcher.
func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool {
// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each
// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found.
//
// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or
// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered.
func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool {
orInput, ok := input.(orStringMatcher)
if !ok {
return false
@ -937,17 +1024,27 @@ func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalSt
for _, m := range orInput {
switch casted := m.(type) {
case orStringMatcher:
if !findEqualStringMatchers(m, callback) {
if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) {
return false
}
case *equalStringMatcher:
if !callback(casted) {
if !equalMatcherCallback(casted) {
return false
}
case *literalPrefixSensitiveStringMatcher:
if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) {
return false
}
case *literalPrefixInsensitiveStringMatcher:
if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) {
return false
}
default:
// It's not an equal string matcher, so we have to stop searching
// It's not an equal or prefix string matcher, so we have to stop searching
// cause this optimization can't be applied.
return false
}

View file

@ -71,6 +71,8 @@ var (
// A long case insensitive alternation.
"(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))",
"(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))",
// A short case insensitive alternation where each entry ends with ".*".
"(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*))",
// A long case insensitive alternation where each entry ends with ".*".
"(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*|qbvKMimS.*|IecrXtPa.*|seTckYqt.*|NxnyHkgB.*|fIDlOgKb.*|UhlWIygH.*|OtNoJxHG.*|cUTkFVIV.*|mTgFIHjr.*|jQkoIDtE.*|PPMKxRXl.*|AwMfwVkQ.*|CQyMrTQJ.*|BzrqxVSi.*|nTpcWuhF.*|PertdywG.*|ZZDgCtXN.*|WWdDPyyE.*|uVtNQsKk.*|BdeCHvPZ.*|wshRnFlH.*|aOUIitIp.*|RxZeCdXT.*|CFZMslCj.*|AVBZRDxl.*|IzIGCnhw.*|ythYuWiz.*|oztXVXhl.*|VbLkwqQx.*|qvaUgyVC.*|VawUjPWC.*|ecloYJuj.*|boCLTdSU.*|uPrKeAZx.*|hrMWLWBq.*|JOnUNHRM.*|rYnujkPq.*|dDEdZhIj.*|DRrfvugG.*|yEGfDxVV.*|YMYdJWuP.*|PHUQZNWM.*|AmKNrLis.*|zTxndVfn.*|FPsHoJnc.*|EIulZTua.*|KlAPhdzg.*|ScHJJCLt.*|NtTfMzME.*|eMCwuFdo.*|SEpJVJbR.*|cdhXZeCx.*|sAVtBwRh.*|kVFEVcMI.*|jzJrxraA.*|tGLHTell.*|NNWoeSaw.*|DcOKSetX.*|UXZAJyka.*|THpMphDP.*|rizheevl.*|kDCBRidd.*|pCZZRqyu.*|pSygkitl.*|SwZGkAaW.*|wILOrfNX.*|QkwVOerj.*|kHOMxPDr.*|EwOVycJv.*|AJvtzQFS.*|yEOjKYYB.*|LizIINLL.*|JBRSsfcG.*|YPiUqqNl.*|IsdEbvee.*|MjEpGcBm.*|OxXZVgEQ.*|xClXGuxa.*|UzRCGFEb.*|buJbvfvA.*|IPZQxRet.*|oFYShsMc.*|oBHffuHO.*|bzzKrcBR.*|KAjzrGCl.*|IPUsAVls.*|OGMUMbIU.*|gyDccHuR.*|bjlalnDd.*|ZLWjeMna.*|fdsuIlxQ.*|dVXtiomV.*|XxedTjNg.*|XWMHlNoA.*|nnyqArQX.*|opfkWGhb.*|wYtnhdYb.*))",
// A long case insensitive alternation where each entry starts with ".*".
@ -376,7 +378,7 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{"10\\.0\\.(1|2)\\.+", nil},
{"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}},
{"foo-.*$", &literalPrefixStringMatcher{prefix: "foo-", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}},
{"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}},
{"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})},
{"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})},
@ -391,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{".*foo.*bar.*", nil},
{`\d*`, nil},
{".", nil},
{"/|/bar.*", &literalPrefixStringMatcher{prefix: "/", prefixCaseSensitive: true, right: orStringMatcher{emptyStringMatcher{}, &literalPrefixStringMatcher{prefix: "bar", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}},
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}},
// This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat.
// It would make the code too complex to handle it.
{"(.+)/(foo.*|bar$)", nil},
// Case sensitive alternate with same literal prefix and .* suffix.
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "xyz-016a-ixb-", prefixCaseSensitive: true, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "dp", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "op", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}},
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}},
// Case insensitive alternate with same literal prefix and .* suffix.
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
// Concatenated variable length selectors are not supported.
{"foo.*.*", nil},
{"foo.+.+", nil},
@ -408,9 +410,9 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{"aaa.?.?", nil},
{"aaa.?.*", nil},
// Regexps with ".?".
{"ext.?|xfs", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"foo.?", &literalPrefixStringMatcher{prefix: "foo", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}},
{"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}},
{"f.?o", nil},
} {
c := c
@ -480,10 +482,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
re := regexp.MustCompile("^" + c.pattern + "$")
// Pre-condition check: ensure it contains literalPrefixStringMatcher.
// Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher.
numPrefixMatchers := 0
visitStringMatcher(matcher, func(matcher StringMatcher) {
if _, ok := matcher.(*literalPrefixStringMatcher); ok {
if _, ok := matcher.(*literalPrefixSensitiveStringMatcher); ok {
numPrefixMatchers++
}
if _, ok := matcher.(*literalPrefixInsensitiveStringMatcher); ok {
numPrefixMatchers++
}
})
@ -683,7 +688,15 @@ func randStrings(randGenerator *rand.Rand, many, length int) []string {
return out
}
func TestOptimizeEqualStringMatchers(t *testing.T) {
func randStringsWithSuffix(randGenerator *rand.Rand, many, length int, suffix string) []string {
out := randStrings(randGenerator, many, length)
for i := range out {
out[i] += suffix
}
return out
}
func TestOptimizeEqualOrPrefixStringMatchers(t *testing.T) {
tests := map[string]struct {
input StringMatcher
expectedValues []string
@ -764,7 +777,7 @@ func TestOptimizeEqualStringMatchers(t *testing.T) {
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
actualMatcher := optimizeEqualStringMatchers(testData.input, 0)
actualMatcher := optimizeEqualOrPrefixStringMatchers(testData.input, 0)
if testData.expectedValues == nil {
require.IsType(t, testData.input, actualMatcher)
@ -779,10 +792,12 @@ func TestOptimizeEqualStringMatchers(t *testing.T) {
func TestNewEqualMultiStringMatcher(t *testing.T) {
tests := map[string]struct {
values []string
caseSensitive bool
expectedValuesMap map[string]struct{}
expectedValuesList []string
values []string
caseSensitivePrefixes []*literalPrefixSensitiveStringMatcher
caseSensitive bool
expectedValuesMap map[string]struct{}
expectedPrefixesMap map[string][]StringMatcher
expectedValuesList []string
}{
"few case sensitive values": {
values: []string{"a", "B"},
@ -794,27 +809,47 @@ func TestNewEqualMultiStringMatcher(t *testing.T) {
caseSensitive: false,
expectedValuesList: []string{"a", "B"},
},
"few case sensitive values and prefixes": {
values: []string{"a"},
caseSensitivePrefixes: []*literalPrefixSensitiveStringMatcher{{prefix: "B", right: anyStringWithoutNewlineMatcher{}}},
caseSensitive: true,
expectedValuesMap: map[string]struct{}{"a": {}},
expectedPrefixesMap: map[string][]StringMatcher{"B": {&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}},
},
"many case sensitive values": {
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
caseSensitive: true,
expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}},
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
caseSensitive: true,
expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}},
expectedPrefixesMap: map[string][]StringMatcher{},
},
"many case insensitive values": {
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
caseSensitive: false,
expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}},
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
caseSensitive: false,
expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}},
expectedPrefixesMap: map[string][]StringMatcher{},
},
}
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values))
// To keep this test simple, we always assume a min prefix length of 1.
minPrefixLength := 0
if len(testData.caseSensitivePrefixes) > 0 {
minPrefixLength = 1
}
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.caseSensitivePrefixes), minPrefixLength)
for _, v := range testData.values {
matcher.add(v)
}
if testData.expectedValuesMap != nil {
for _, p := range testData.caseSensitivePrefixes {
matcher.addPrefix(p.prefix, true, p)
}
if testData.expectedValuesMap != nil || testData.expectedPrefixesMap != nil {
require.IsType(t, &equalMultiStringMapMatcher{}, matcher)
require.Equal(t, testData.expectedValuesMap, matcher.(*equalMultiStringMapMatcher).values)
require.Equal(t, testData.expectedPrefixesMap, matcher.(*equalMultiStringMapMatcher).prefixes)
require.Equal(t, testData.caseSensitive, matcher.(*equalMultiStringMapMatcher).caseSensitive)
}
if testData.expectedValuesList != nil {
@ -826,9 +861,32 @@ func TestNewEqualMultiStringMatcher(t *testing.T) {
}
}
func TestEqualMultiStringMapMatcher_addPrefix(t *testing.T) {
t.Run("should panic if the matcher is case sensitive but the prefix is not case sensitive", func(t *testing.T) {
matcher := newEqualMultiStringMatcher(true, 0, 1, 1)
require.Panics(t, func() {
matcher.addPrefix("a", false, &literalPrefixInsensitiveStringMatcher{
prefix: "a",
})
})
})
t.Run("should panic if the matcher is not case sensitive but the prefix is case sensitive", func(t *testing.T) {
matcher := newEqualMultiStringMatcher(false, 0, 1, 1)
require.Panics(t, func() {
matcher.addPrefix("a", true, &literalPrefixSensitiveStringMatcher{
prefix: "a",
})
})
})
}
func TestEqualMultiStringMatcher_Matches(t *testing.T) {
tests := map[string]struct {
values []string
prefixes []StringMatcher
caseSensitive bool
expectedMatches []string
expectedNotMatches []string
@ -845,6 +903,24 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) {
expectedMatches: []string{"a", "A", "b", "B"},
expectedNotMatches: []string{"c", "C"},
},
"few case sensitive prefixes": {
prefixes: []StringMatcher{
&literalPrefixSensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}},
&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}},
},
caseSensitive: true,
expectedMatches: []string{"a", "aX", "B", "BX"},
expectedNotMatches: []string{"A", "b"},
},
"few case insensitive prefixes": {
prefixes: []StringMatcher{
&literalPrefixInsensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}},
&literalPrefixInsensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}},
},
caseSensitive: false,
expectedMatches: []string{"a", "aX", "A", "AX", "b", "bX", "B", "BX"},
expectedNotMatches: []string{"c", "cX", "C", "CX"},
},
"many case sensitive values": {
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
caseSensitive: true,
@ -857,14 +933,37 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) {
expectedMatches: []string{"a", "A", "b", "B"},
expectedNotMatches: []string{"x", "X"},
},
"mixed values and prefixes": {
values: []string{"a"},
prefixes: []StringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}},
caseSensitive: true,
expectedMatches: []string{"a", "B", "BX"},
expectedNotMatches: []string{"aX", "A", "b", "bX"},
},
}
for testName, testData := range tests {
t.Run(testName, func(t *testing.T) {
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values))
// To keep this test simple, we always assume a min prefix length of 1.
minPrefixLength := 0
if len(testData.prefixes) > 0 {
minPrefixLength = 1
}
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.prefixes), minPrefixLength)
for _, v := range testData.values {
matcher.add(v)
}
for _, p := range testData.prefixes {
switch m := p.(type) {
case *literalPrefixSensitiveStringMatcher:
matcher.addPrefix(m.prefix, true, p)
case *literalPrefixInsensitiveStringMatcher:
matcher.addPrefix(m.prefix, false, p)
default:
panic("Unexpected type in test case")
}
}
for _, v := range testData.expectedMatches {
require.True(t, matcher.Matches(v), "value: %s", v)
@ -876,29 +975,33 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) {
}
}
func TestFindEqualStringMatchers(t *testing.T) {
func TestFindEqualOrPrefixStringMatchers(t *testing.T) {
type match struct {
s string
caseSensitive bool
}
// Utility to call findEqualStringMatchers() and collect all callback invocations.
findEqualStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) {
ok = findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
// Utility to call findEqualOrPrefixStringMatchers() and collect all callback invocations.
findEqualOrPrefixStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) {
ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
matches = append(matches, match{matcher.s, matcher.caseSensitive})
return true
}, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool {
matches = append(matches, match{prefix, prefixCaseSensitive})
return true
})
return
}
t.Run("empty matcher", func(t *testing.T) {
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(emptyStringMatcher{})
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(emptyStringMatcher{})
require.False(t, actualOk)
require.Empty(t, actualMatches)
})
t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) {
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&equalStringMatcher{s: "test-1", caseSensitive: true},
&equalStringMatcher{s: "test-2", caseSensitive: true},
@ -910,7 +1013,7 @@ func TestFindEqualStringMatchers(t *testing.T) {
})
t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) {
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&equalStringMatcher{s: "test-1", caseSensitive: false},
&equalStringMatcher{s: "test-2", caseSensitive: false},
@ -922,7 +1025,7 @@ func TestFindEqualStringMatchers(t *testing.T) {
})
t.Run("concat of literal matchers (mixed case)", func(t *testing.T) {
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&equalStringMatcher{s: "test-1", caseSensitive: false},
&equalStringMatcher{s: "test-2", caseSensitive: true},
@ -932,11 +1035,59 @@ func TestFindEqualStringMatchers(t *testing.T) {
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
})
t.Run("concat of literal prefix matchers (case sensitive)", func(t *testing.T) {
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&literalPrefixSensitiveStringMatcher{prefix: "test-1"},
&literalPrefixSensitiveStringMatcher{prefix: "test-2"},
},
)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
})
t.Run("concat of literal prefix matchers (case insensitive)", func(t *testing.T) {
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&literalPrefixInsensitiveStringMatcher{prefix: "test-1"},
&literalPrefixInsensitiveStringMatcher{prefix: "test-2"},
},
)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches)
})
t.Run("concat of literal prefix matchers (mixed case)", func(t *testing.T) {
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&literalPrefixInsensitiveStringMatcher{prefix: "test-1"},
&literalPrefixSensitiveStringMatcher{prefix: "test-2"},
},
)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
})
t.Run("concat of literal string and prefix matchers (case sensitive)", func(t *testing.T) {
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
orStringMatcher{
&equalStringMatcher{s: "test-1", caseSensitive: true},
&literalPrefixSensitiveStringMatcher{prefix: "test-2"},
},
)
require.True(t, actualOk)
require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
})
}
// This benchmark is used to find a good threshold to use to apply the optimization
// done by optimizeEqualStringMatchers().
func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
// done by optimizeEqualOrPrefixStringMatchers().
func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) {
randGenerator := rand.New(rand.NewSource(time.Now().UnixNano()))
// Generate variable lengths random texts to match against.
@ -946,42 +1097,51 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
for numAlternations := 2; numAlternations <= 256; numAlternations *= 2 {
for _, caseSensitive := range []bool{true, false} {
b.Run(fmt.Sprintf("alternations: %d case sensitive: %t", numAlternations, caseSensitive), func(b *testing.B) {
// Generate a regex with the expected number of alternations.
re := strings.Join(randStrings(randGenerator, numAlternations, 10), "|")
if !caseSensitive {
re = "(?i:(" + re + "))"
}
parsed, err := syntax.Parse(re, syntax.Perl)
require.NoError(b, err)
unoptimized := stringMatcherFromRegexpInternal(parsed)
require.IsType(b, orStringMatcher{}, unoptimized)
optimized := optimizeEqualStringMatchers(unoptimized, 0)
if numAlternations < minEqualMultiStringMatcherMapThreshold {
require.IsType(b, &equalMultiStringSliceMatcher{}, optimized)
} else {
require.IsType(b, &equalMultiStringMapMatcher{}, optimized)
}
b.Run("without optimizeEqualStringMatchers()", func(b *testing.B) {
for n := 0; n < b.N; n++ {
for _, t := range texts {
unoptimized.Matches(t)
}
for _, prefixMatcher := range []bool{true, false} {
b.Run(fmt.Sprintf("alternations: %d case sensitive: %t prefix matcher: %t", numAlternations, caseSensitive, prefixMatcher), func(b *testing.B) {
// If the test should run on prefix matchers, we add a wildcard matcher as suffix (prefix will be a literal).
suffix := ""
if prefixMatcher {
suffix = ".*"
}
})
b.Run("with optimizeEqualStringMatchers()", func(b *testing.B) {
for n := 0; n < b.N; n++ {
for _, t := range texts {
optimized.Matches(t)
}
// Generate a regex with the expected number of alternations.
re := strings.Join(randStringsWithSuffix(randGenerator, numAlternations, 10, suffix), "|")
if !caseSensitive {
re = "(?i:(" + re + "))"
}
b.Logf("regexp: %s", re)
parsed, err := syntax.Parse(re, syntax.Perl)
require.NoError(b, err)
unoptimized := stringMatcherFromRegexpInternal(parsed)
require.IsType(b, orStringMatcher{}, unoptimized)
optimized := optimizeEqualOrPrefixStringMatchers(unoptimized, 0)
if numAlternations < minEqualMultiStringMatcherMapThreshold && !prefixMatcher {
require.IsType(b, &equalMultiStringSliceMatcher{}, optimized)
} else {
require.IsType(b, &equalMultiStringMapMatcher{}, optimized)
}
b.Run("without optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) {
for n := 0; n < b.N; n++ {
for _, t := range texts {
unoptimized.Matches(t)
}
}
})
b.Run("with optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) {
for n := 0; n < b.N; n++ {
for _, t := range texts {
optimized.Matches(t)
}
}
})
})
})
}
}
}
}
@ -1074,20 +1234,14 @@ func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) {
}
}
func TestLiteralPrefixStringMatcher(t *testing.T) {
m := &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &emptyStringMatcher{}}
func TestLiteralPrefixSensitiveStringMatcher(t *testing.T) {
m := &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}}
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.False(t, m.Matches("mAr"))
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: false, right: &emptyStringMatcher{}}
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.True(t, m.Matches("mAr"))
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &equalStringMatcher{s: "co", caseSensitive: false}}
m = &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &equalStringMatcher{s: "co", caseSensitive: false}}
require.True(t, m.Matches("marco"))
require.True(t, m.Matches("marCO"))
require.False(t, m.Matches("MARco"))
@ -1095,6 +1249,14 @@ func TestLiteralPrefixStringMatcher(t *testing.T) {
require.False(t, m.Matches("marcopracucci"))
}
func TestLiteralPrefixInsensitiveStringMatcher(t *testing.T) {
m := &literalPrefixInsensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}}
require.True(t, m.Matches("mar"))
require.False(t, m.Matches("marco"))
require.False(t, m.Matches("ma"))
require.True(t, m.Matches("mAr"))
}
func TestLiteralSuffixStringMatcher(t *testing.T) {
m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true}
require.True(t, m.Matches("co"))
@ -1184,7 +1346,10 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch
visitStringMatcher(casted.right, callback)
}
case *literalPrefixStringMatcher:
case *literalPrefixSensitiveStringMatcher:
visitStringMatcher(casted.right, callback)
case *literalPrefixInsensitiveStringMatcher:
visitStringMatcher(casted.right, callback)
case *literalSuffixStringMatcher:
@ -1196,10 +1361,16 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch
}
// No nested matchers for the following ones.
case *equalMultiStringMapMatcher:
for _, prefixes := range casted.prefixes {
for _, matcher := range prefixes {
visitStringMatcher(matcher, callback)
}
}
case emptyStringMatcher:
case *equalStringMatcher:
case *equalMultiStringSliceMatcher:
case *equalMultiStringMapMatcher:
case anyStringWithoutNewlineMatcher:
case *anyNonEmptyStringMatcher:
case trueMatcher:

View file

@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 {
}
b = append(b, v.Name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, v.Value...)
b = append(b, seps[0])
b = append(b, sep)
}
return xxhash.Sum64(b)
}

View file

@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
}
b = append(b, name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, value...)
b = append(b, seps[0])
b = append(b, sep)
pos = newPos
}
return xxhash.Sum64(b)

View file

@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 {
}
b = append(b, v.Name...)
b = append(b, seps[0])
b = append(b, sep)
b = append(b, v.Value...)
b = append(b, seps[0])
b = append(b, sep)
}
if h != nil {
return h.Sum64()

201
prompb/codec.go Normal file
View file

@ -0,0 +1,201 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prompb
import (
"strings"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
)
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
// ToLabels return model labels.Labels from timeseries' remote labels.
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
return labelProtosToLabels(b, m.GetLabels())
}
// ToLabels return model labels.Labels from timeseries' remote labels.
func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
return labelProtosToLabels(b, m.GetLabels())
}
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels {
b.Reset()
for _, l := range labelPairs {
b.Add(l.Name, l.Value)
}
b.Sort()
return b.Labels()
}
// FromLabels transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels.
func FromLabels(lbls labels.Labels, buf []Label) []Label {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, Label{
Name: l.Name,
Value: l.Value,
})
})
return result
}
// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
func FromMetadataType(t model.MetricType) MetricMetadata_MetricType {
mt := strings.ToUpper(string(t))
v, ok := MetricMetadata_MetricType_value[mt]
if !ok {
return MetricMetadata_UNKNOWN
}
return MetricMetadata_MetricType(v)
}
// IsFloatHistogram returns true if the histogram is float.
func (h Histogram) IsFloatHistogram() bool {
_, ok := h.GetCount().(*Histogram_CountFloat)
return ok
}
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
// of integer histogram. If it's a float histogram, the method returns nil.
func (h Histogram) ToIntHistogram() *histogram.Histogram {
if h.IsFloatHistogram() {
return nil
}
return &histogram.Histogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountInt(),
Count: h.GetCountInt(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveDeltas(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeDeltas(),
}
}
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
// of float histogram. If the underlying implementation is an integer histogram, a
// conversion is performed.
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
if h.IsFloatHistogram() {
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountFloat(),
Count: h.GetCountFloat(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeCounts(),
}
}
// Conversion from integer histogram.
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: float64(h.GetZeroCountInt()),
Count: float64(h.GetCountInt()),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
}
}
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func deltasToCounts(deltas []int64) []float64 {
counts := make([]float64, len(deltas))
var cur float64
for i, d := range deltas {
cur += float64(d)
counts[i] = cur
}
return counts
}
// FromIntHistogram returns remote Histogram from the integer Histogram.
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
return Histogram{
Count: &Histogram_CountInt{CountInt: h.Count},
Sum: h.Sum,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
NegativeSpans: spansToSpansProto(h.NegativeSpans),
NegativeDeltas: h.NegativeBuckets,
PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets,
ResetHint: Histogram_ResetHint(h.CounterResetHint),
Timestamp: timestamp,
}
}
// FromFloatHistogram returns remote Histogram from the float Histogram.
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
return Histogram{
Count: &Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
Timestamp: timestamp,
}
}
func spansToSpansProto(s []histogram.Span) []BucketSpan {
spans := make([]BucketSpan, len(s))
for i := 0; i < len(s); i++ {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
// ToExemplar converts remote exemplar to model exemplar.
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar {
timestamp := m.Timestamp
return exemplar.Exemplar{
Labels: labelProtosToLabels(b, m.GetLabels()),
Value: m.Value,
Ts: timestamp,
HasTs: timestamp != 0,
}
}

View file

@ -17,14 +17,6 @@ import (
"sync"
)
func (m Sample) T() int64 { return m.Timestamp }
func (m Sample) V() float64 { return m.Value }
func (h Histogram) IsFloatHistogram() bool {
_, ok := h.GetCount().(*Histogram_CountFloat)
return ok
}
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
size := r.Size()
data, ok := p.Get().(*[]byte)

View file

@ -0,0 +1,216 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
)
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
// ToLabels return model labels.Labels from timeseries' remote labels.
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
}
// ToMetadata return model metadata from timeseries' remote metadata.
func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata {
typ := model.MetricTypeUnknown
switch m.Metadata.Type {
case Metadata_METRIC_TYPE_COUNTER:
typ = model.MetricTypeCounter
case Metadata_METRIC_TYPE_GAUGE:
typ = model.MetricTypeGauge
case Metadata_METRIC_TYPE_HISTOGRAM:
typ = model.MetricTypeHistogram
case Metadata_METRIC_TYPE_GAUGEHISTOGRAM:
typ = model.MetricTypeGaugeHistogram
case Metadata_METRIC_TYPE_SUMMARY:
typ = model.MetricTypeSummary
case Metadata_METRIC_TYPE_INFO:
typ = model.MetricTypeInfo
case Metadata_METRIC_TYPE_STATESET:
typ = model.MetricTypeStateset
}
return metadata.Metadata{
Type: typ,
Unit: symbols[m.Metadata.UnitRef],
Help: symbols[m.Metadata.HelpRef],
}
}
// FromMetadataType transforms a Prometheus metricType into writev2 metricType.
// Since the former is a string we need to transform it to an enum.
func FromMetadataType(t model.MetricType) Metadata_MetricType {
switch t {
case model.MetricTypeCounter:
return Metadata_METRIC_TYPE_COUNTER
case model.MetricTypeGauge:
return Metadata_METRIC_TYPE_GAUGE
case model.MetricTypeHistogram:
return Metadata_METRIC_TYPE_HISTOGRAM
case model.MetricTypeGaugeHistogram:
return Metadata_METRIC_TYPE_GAUGEHISTOGRAM
case model.MetricTypeSummary:
return Metadata_METRIC_TYPE_SUMMARY
case model.MetricTypeInfo:
return Metadata_METRIC_TYPE_INFO
case model.MetricTypeStateset:
return Metadata_METRIC_TYPE_STATESET
default:
return Metadata_METRIC_TYPE_UNSPECIFIED
}
}
// IsFloatHistogram returns true if the histogram is float.
func (h Histogram) IsFloatHistogram() bool {
_, ok := h.GetCount().(*Histogram_CountFloat)
return ok
}
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
// of integer histogram. If it's a float histogram, the method returns nil.
func (h Histogram) ToIntHistogram() *histogram.Histogram {
if h.IsFloatHistogram() {
return nil
}
return &histogram.Histogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountInt(),
Count: h.GetCountInt(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveDeltas(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeDeltas(),
CustomValues: h.GetCustomValues(),
}
}
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
// of float histogram. If the underlying implementation is an integer histogram, a
// conversion is performed.
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
if h.IsFloatHistogram() {
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.GetZeroCountFloat(),
Count: h.GetCountFloat(),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: h.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeCounts(),
CustomValues: h.GetCustomValues(),
}
}
// Conversion from integer histogram.
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: float64(h.GetZeroCountInt()),
Count: float64(h.GetCountInt()),
Sum: h.Sum,
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
CustomValues: h.GetCustomValues(),
}
}
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func deltasToCounts(deltas []int64) []float64 {
counts := make([]float64, len(deltas))
var cur float64
for i, d := range deltas {
cur += float64(d)
counts[i] = cur
}
return counts
}
// FromIntHistogram returns remote Histogram from the integer Histogram.
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
return Histogram{
Count: &Histogram_CountInt{CountInt: h.Count},
Sum: h.Sum,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
NegativeSpans: spansToSpansProto(h.NegativeSpans),
NegativeDeltas: h.NegativeBuckets,
PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets,
ResetHint: Histogram_ResetHint(h.CounterResetHint),
CustomValues: h.CustomValues,
Timestamp: timestamp,
}
}
// FromFloatHistogram returns remote Histogram from the float Histogram.
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
return Histogram{
Count: &Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
CustomValues: fh.CustomValues,
Timestamp: timestamp,
}
}
func spansToSpansProto(s []histogram.Span) []BucketSpan {
spans := make([]BucketSpan, len(s))
for i := 0; i < len(s); i++ {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar {
timestamp := m.Timestamp
return exemplar.Exemplar{
Labels: desymbolizeLabels(b, m.LabelsRefs, symbols),
Value: m.Value,
Ts: timestamp,
HasTs: timestamp != 0,
}
}

View file

@ -0,0 +1,165 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"slices"
)
func (m Sample) T() int64 { return m.Timestamp }
func (m Sample) V() float64 { return m.Value }
func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) {
siz := m.Size()
if cap(dst) < siz {
dst = make([]byte, siz)
}
n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz])
if err != nil {
return nil, err
}
return dst[:n], nil
}
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
// but calls OptimizedMarshalToSizedBuffer on the timeseries.
func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
}
if len(m.Symbols) > 0 {
for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Symbols[iNdEx])
copy(dAtA[i:], m.Symbols[iNdEx])
i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
i--
dAtA[i] = 0x22
}
}
return len(dAtA) - i, nil
}
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
// but marshals m.LabelsRefs in place without extra allocations.
func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.CreatedTimestamp != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
i--
dAtA[i] = 0x30
}
{
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
if len(m.Histograms) > 0 {
for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if len(m.Exemplars) > 0 {
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
}
if len(m.Samples) > 0 {
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.LabelsRefs) > 0 {
// This is the trick: encode the varints in reverse order to make it easier
// to do it in place. Then reverse the whole thing.
var j10 int
start := i
for _, num := range m.LabelsRefs {
for num >= 1<<7 {
dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
i--
j10++
}
dAtA[i-1] = uint8(num)
i--
j10++
}
slices.Reverse(dAtA[i:start])
// --- end of trick
i = encodeVarintTypes(dAtA, i, uint64(j10))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}

View file

@ -0,0 +1,97 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestOptimizedMarshal(t *testing.T) {
for _, tt := range []struct {
name string
m *Request
}{
{
name: "empty",
m: &Request{},
},
{
name: "simple",
m: &Request{
Timeseries: []TimeSeries{
{
LabelsRefs: []uint32{
0, 1,
2, 3,
4, 5,
6, 7,
8, 9,
10, 11,
12, 13,
14, 15,
},
Samples: []Sample{{Value: 1, Timestamp: 0}},
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}},
Histograms: nil,
},
{
LabelsRefs: []uint32{
0, 1,
2, 3,
4, 5,
6, 7,
8, 9,
10, 11,
12, 13,
14, 15,
},
Samples: []Sample{{Value: 2, Timestamp: 1}},
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}},
Histograms: nil,
},
},
Symbols: []string{
"a", "b",
"c", "d",
"e", "f",
"g", "h",
"i", "j",
"k", "l",
"m", "n",
"o", "p",
},
},
},
} {
t.Run(tt.name, func(t *testing.T) {
// Keep the slice allocated to mimic what std Marshal
// would give to sized Marshal.
got := make([]byte, 0)
// Should be the same as the standard marshal.
expected, err := tt.m.Marshal()
require.NoError(t, err)
got, err = tt.m.OptimizedMarshal(got)
require.NoError(t, err)
require.Equal(t, expected, got)
// Unmarshal should work too.
m := &Request{}
require.NoError(t, m.Unmarshal(got))
require.Equal(t, tt.m, m)
})
}
}

View file

@ -0,0 +1,83 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import "github.com/prometheus/prometheus/model/labels"
// SymbolsTable implements table for easy symbol use.
type SymbolsTable struct {
strings []string
symbolsMap map[string]uint32
}
// NewSymbolTable returns a symbol table.
func NewSymbolTable() SymbolsTable {
return SymbolsTable{
// Empty string is required as a first element.
symbolsMap: map[string]uint32{"": 0},
strings: []string{""},
}
}
// Symbolize adds (if not added before) a string to the symbols table,
// while returning its reference number.
func (t *SymbolsTable) Symbolize(str string) uint32 {
if ref, ok := t.symbolsMap[str]; ok {
return ref
}
ref := uint32(len(t.strings))
t.strings = append(t.strings, str)
t.symbolsMap[str] = ref
return ref
}
// SymbolizeLabels symbolize Prometheus labels.
func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 {
result := buf[:0]
lbls.Range(func(l labels.Label) {
off := t.Symbolize(l.Name)
result = append(result, off)
off = t.Symbolize(l.Value)
result = append(result, off)
})
return result
}
// Symbols returns computes symbols table to put in e.g. Request.Symbols.
// As per spec, order does not matter.
func (t *SymbolsTable) Symbols() []string {
return t.strings
}
// Reset clears symbols table.
func (t *SymbolsTable) Reset() {
// NOTE: Make sure to keep empty symbol.
t.strings = t.strings[:1]
for k := range t.symbolsMap {
if k == "" {
continue
}
delete(t.symbolsMap, k)
}
}
// desymbolizeLabels decodes label references, with given symbols to labels.
func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
b.Reset()
for i := 0; i < len(labelRefs); i += 2 {
b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
}
b.Sort()
return b.Labels()
}

View file

@ -0,0 +1,60 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
)
func TestSymbolsTable(t *testing.T) {
s := NewSymbolTable()
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
require.Equal(t, uint32(0), s.Symbolize(""))
require.Equal(t, []string{""}, s.Symbols())
require.Equal(t, uint32(1), s.Symbolize("abc"))
require.Equal(t, []string{"", "abc"}, s.Symbols())
require.Equal(t, uint32(2), s.Symbolize("__name__"))
require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols())
require.Equal(t, uint32(3), s.Symbolize("foo"))
require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols())
s.Reset()
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
require.Equal(t, uint32(0), s.Symbolize(""))
require.Equal(t, uint32(1), s.Symbolize("__name__"))
require.Equal(t, []string{"", "__name__"}, s.Symbols())
require.Equal(t, uint32(2), s.Symbolize("abc"))
require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols())
ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234")
encoded := s.SymbolizeLabels(ls, nil)
require.Equal(t, []uint32{1, 3, 4, 5}, encoded)
b := labels.NewScratchBuilder(len(encoded))
decoded := desymbolizeLabels(&b, encoded, s.Symbols())
require.Equal(t, ls, decoded)
// Different buf.
ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234")
encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5})
require.Equal(t, []uint32{1, 3, 6, 5}, encoded)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,260 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2
syntax = "proto3";
package io.prometheus.write.v2;
option go_package = "writev2";
import "gogoproto/gogo.proto";
// Request represents a request to write the given timeseries to a remote destination.
// This message was introduced in the Remote Write 2.0 specification:
// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
//
// The canonical Content-Type request header value for this message is
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
//
// NOTE: gogoproto options might change in future for this file, they
// are not part of the spec proto (they only modify the generated Go code, not
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
message Request {
// Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message
// for the deterministic interop between those two, see types_test.go for details.
// Generally it's not needed, because Receivers must use the Content-Type header, but we want to
// be sympathetic to adopters with mistaken implementations and have deterministic error (empty
// message if you use the wrong proto schema).
reserved 1 to 3;
// symbols contains a de-duplicated array of string elements used for various
// items in a Request message, like labels and metadata items. For the sender's convenience
// around empty values for optional fields like unit_ref, symbols array MUST start with
// empty string.
//
// To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
// need to lookup the actual string by index from symbols array. The order of
// strings is up to the sender. The receiver should not assume any particular encoding.
repeated string symbols = 4;
// timeseries represents an array of distinct series with 0 or more samples.
repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false];
}
// TimeSeries represents a single series.
message TimeSeries {
// labels_refs is a list of label name-value pair references, encoded
// as indices to the Request.symbols array. This list's length is always
// a multiple of two, and the underlying labels should be sorted lexicographically.
//
// Note that there might be multiple TimeSeries objects in the same
// Requests with the same labels e.g. for different exemplars, metadata
// or created timestamp.
repeated uint32 labels_refs = 1;
// Timeseries messages can either specify samples or (native) histogram samples
// (histogram field), but not both. For a typical sender (real-time metric
// streaming), in healthy cases, there will be only one sample or histogram.
//
// Samples and histograms are sorted by timestamp (older first).
repeated Sample samples = 2 [(gogoproto.nullable) = false];
repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
// exemplars represents an optional set of exemplars attached to this series' samples.
repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
// metadata represents the metadata associated with the given series' samples.
Metadata metadata = 5 [(gogoproto.nullable) = false];
// created_timestamp represents an optional created timestamp associated with
// this series' samples in ms format, typically for counter or histogram type
// metrics. Created timestamp represents the time when the counter started
// counting (sometimes referred to as start timestamp), which can increase
// the accuracy of query results.
//
// Note that some receivers might require this and in return fail to
// ingest such samples within the Request.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
int64 created_timestamp = 6;
}
// Exemplar is an additional information attached to some series' samples.
// It is typically used to attach an example trace or request ID associated with
// the metric changes.
message Exemplar {
// labels_refs is an optional list of label name-value pair references, encoded
// as indices to the Request.symbols array. This list's len is always
// a multiple of 2, and the underlying labels should be sorted lexicographically.
// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
repeated uint32 labels_refs = 1;
// value represents an exact example value. This can be useful when the exemplar
// is attached to a histogram, which only gives an estimated value through buckets.
double value = 2;
// timestamp represents an optional timestamp of the sample in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
//
// Note that the "optional" keyword is omitted due to
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
// Zero value means value not set. If you need to use exactly zero value for
// the timestamp, use 1 millisecond before or after.
int64 timestamp = 3;
}
// Sample represents series sample.
message Sample {
// value of the sample.
double value = 1;
// timestamp represents timestamp of the sample in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
int64 timestamp = 2;
}
// Metadata represents the metadata associated with the given series' samples.
message Metadata {
enum MetricType {
METRIC_TYPE_UNSPECIFIED = 0;
METRIC_TYPE_COUNTER = 1;
METRIC_TYPE_GAUGE = 2;
METRIC_TYPE_HISTOGRAM = 3;
METRIC_TYPE_GAUGEHISTOGRAM = 4;
METRIC_TYPE_SUMMARY = 5;
METRIC_TYPE_INFO = 6;
METRIC_TYPE_STATESET = 7;
}
MetricType type = 1;
// help_ref is a reference to the Request.symbols array representing help
// text for the metric. Help is optional, reference should point to an empty string in
// such a case.
uint32 help_ref = 3;
// unit_ref is a reference to the Request.symbols array representing a unit
// for the metric. Unit is optional, reference should point to an empty string in
// such a case.
uint32 unit_ref = 4;
}
// A native histogram, also known as a sparse histogram.
// Original design doc:
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
// The appendix of this design doc also explains the concept of float
// histograms. This Histogram message can represent both, the usual
// integer histogram as well as a float histogram.
message Histogram {
enum ResetHint {
RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset.
RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram.
RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
}
oneof count { // Count of observations in the histogram.
uint64 count_int = 1;
double count_float = 2;
}
double sum = 3; // Sum of observations in the histogram.
// The schema defines the bucket schema. Currently, valid numbers
// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
// added in future for new bucketing layouts.
//
// The schema equal to -53 means custom buckets. See
// custom_values field description for more details.
//
// Values between -4 and 8 represent base-2 bucket schema, where 1
// is a bucket boundary in each case, and then each power of two is
// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
// each bucket boundary is the previous boundary times 2^(2^-n).
sint32 schema = 4;
double zero_threshold = 5; // Breadth of the zero bucket.
oneof zero_count { // Count in zero bucket.
uint64 zero_count_int = 6;
double zero_count_float = 7;
}
// Negative Buckets.
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
// Use either "negative_deltas" or "negative_counts", the former for
// regular histograms with integer counts, the latter for
// float histograms.
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
repeated double negative_counts = 10; // Absolute count of each bucket.
// Positive Buckets.
//
// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
// * The span offset+length points to an the index of the custom_values array
// or +Inf if pointing to the len of the array.
// * The counts and deltas have the same meaning as for exponential histograms.
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
// Use either "positive_deltas" or "positive_counts", the former for
// regular histograms with integer counts, the latter for
// float histograms.
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
repeated double positive_counts = 13; // Absolute count of each bucket.
ResetHint reset_hint = 14;
// timestamp represents timestamp of the sample in ms.
//
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
// for conversion from/to time.Time to Prometheus timestamp.
int64 timestamp = 15;
// custom_values is an additional field used by non-exponential bucketing layouts.
//
// For custom buckets (-53 schema value) custom_values specify monotonically
// increasing upper inclusive boundaries for the bucket counts with arbitrary
// widths for this histogram. In other words, custom_values represents custom,
// explicit bucketing that could have been converted from the classic histograms.
//
// Those bounds are then referenced by spans in positive_spans with corresponding positive
// counts of deltas (refer to positive_spans for more details). This way we can
// have encode sparse histograms with custom bucketing (many buckets are often
// not used).
//
// Note that for custom bounds, even negative observations are placed in the positive
// counts to simplify the implementation and avoid ambiguity of where to place
// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
// the zero bucket are unused, if the schema indicates custom bucketing.
//
// For each upper boundary the previous boundary represent the lower exclusive
// boundary for that bucket. The first element is the upper inclusive boundary
// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
// This is similar to "le" label semantics on classic histograms. You may add a
// bucket with an upper bound of 0 to make sure that you really have no negative
// observations, but in practice, native histogram rendering will show both with
// or without first upper boundary 0 and no negative counts as the same case.
//
// The last element is not only the upper inclusive bound of the last regular
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
repeated double custom_values = 16;
}
// A BucketSpan defines a number of consecutive buckets with their
// offset. Logically, it would be more straightforward to include the
// bucket counts in the Span. However, the protobuf representation is
// more compact in the way the data is structured here (with all the
// buckets in a single array separate from the Spans).
message BucketSpan {
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
uint32 length = 2; // Length of consecutive buckets.
}

View file

@ -0,0 +1,97 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package writev2
import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/prompb"
)
func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) {
expectedV1Empty := &prompb.WriteRequest{}
for _, tc := range []struct{ incoming *Request }{
{
incoming: &Request{}, // Technically wrong, should be at least empty string in symbol.
},
{
incoming: &Request{
Symbols: []string{""},
}, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element.
},
{
incoming: &Request{
Symbols: []string{"", "__name__", "metric1"},
Timeseries: []TimeSeries{
{LabelsRefs: []uint32{1, 2}},
{Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}},
}, // NOTE: Without reserved fields, proto: illegal wireType 7
},
},
} {
t.Run("", func(t *testing.T) {
in, err := proto.Marshal(tc.incoming)
require.NoError(t, err)
// Test accidental unmarshal of v2 payload with v1 proto.
out := &prompb.WriteRequest{}
require.NoError(t, proto.Unmarshal(in, out))
// Drop unknowns, we expect them when incoming payload had some fields.
// This field & method will be likely gone after gogo removal.
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
require.Equal(t, expectedV1Empty, out)
})
}
}
func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) {
expectedV2Empty := &Request{}
for _, tc := range []struct{ incoming *prompb.WriteRequest }{
{
incoming: &prompb.WriteRequest{},
},
{
incoming: &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}},
Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}},
},
},
},
// NOTE: Without reserved fields, results in corrupted v2.Request.Symbols.
},
} {
t.Run("", func(t *testing.T) {
in, err := proto.Marshal(tc.incoming)
require.NoError(t, err)
// Test accidental unmarshal of v1 payload with v2 proto.
out := &Request{}
require.NoError(t, proto.Unmarshal(in, out))
// Drop unknowns, we expect them when incoming payload had some fields.
// This field & method will be likely gone after gogo removal.
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
require.Equal(t, expectedV2Empty, out)
})
}
}

View file

@ -0,0 +1,298 @@
// Copyright 2024 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rwcommon
import (
"testing"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
)
func TestToLabels(t *testing.T) {
expected := labels.FromStrings("__name__", "metric1", "foo", "bar")
t.Run("v1", func(t *testing.T) {
ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}}
b := labels.NewScratchBuilder(2)
require.Equal(t, expected, ts.ToLabels(&b, nil))
require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil))
require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels))
})
t.Run("v2", func(t *testing.T) {
v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"}
ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}}
b := labels.NewScratchBuilder(2)
require.Equal(t, expected, ts.ToLabels(&b, v2Symbols))
// No need for FromLabels in our prod code as we use symbol table to do so.
})
}
func TestFromMetadataType(t *testing.T) {
for _, tc := range []struct {
desc string
input model.MetricType
expectedV1 prompb.MetricMetadata_MetricType
expectedV2 writev2.Metadata_MetricType
}{
{
desc: "with a single-word metric",
input: model.MetricTypeCounter,
expectedV1: prompb.MetricMetadata_COUNTER,
expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER,
},
{
desc: "with a two-word metric",
input: model.MetricTypeStateset,
expectedV1: prompb.MetricMetadata_STATESET,
expectedV2: writev2.Metadata_METRIC_TYPE_STATESET,
},
{
desc: "with an unknown metric",
input: "not-known",
expectedV1: prompb.MetricMetadata_UNKNOWN,
expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED,
},
} {
t.Run(tc.desc, func(t *testing.T) {
t.Run("v1", func(t *testing.T) {
require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input))
})
t.Run("v2", func(t *testing.T) {
require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input))
})
})
}
}
func TestToMetadata(t *testing.T) {
sym := writev2.NewSymbolTable()
for _, tc := range []struct {
input writev2.Metadata
expected metadata.Metadata
}{
{
input: writev2.Metadata{},
expected: metadata.Metadata{
Type: model.MetricTypeUnknown,
},
},
{
input: writev2.Metadata{
Type: 12414, // Unknown.
},
expected: metadata.Metadata{
Type: model.MetricTypeUnknown,
},
},
{
input: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
HelpRef: sym.Symbolize("help1"),
UnitRef: sym.Symbolize("unit1"),
},
expected: metadata.Metadata{
Type: model.MetricTypeCounter,
Help: "help1",
Unit: "unit1",
},
},
{
input: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_STATESET,
HelpRef: sym.Symbolize("help2"),
},
expected: metadata.Metadata{
Type: model.MetricTypeStateset,
Help: "help2",
},
},
} {
t.Run("", func(t *testing.T) {
ts := writev2.TimeSeries{Metadata: tc.input}
require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols()))
})
}
}
func TestToHistogram_Empty(t *testing.T) {
t.Run("v1", func(t *testing.T) {
require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
})
t.Run("v2", func(t *testing.T) {
require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
})
}
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
// important cases to test when copying or converting to/from int/float histograms.
func testIntHistogram() histogram.Histogram {
return histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Schema: 1,
Count: 19,
Sum: 2.7,
ZeroThreshold: 1e-128,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 5},
{Offset: 1, Length: 0},
{Offset: 0, Length: 1},
},
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
CustomValues: []float64{21421, 523},
}
}
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
// important cases to test when copying or converting to/from int/float histograms.
func testFloatHistogram() histogram.FloatHistogram {
return histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 1,
Count: 19,
Sum: 2.7,
ZeroThreshold: 1e-128,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 5},
{Offset: 1, Length: 0},
{Offset: 0, Length: 1},
},
NegativeBuckets: []float64{1, 3, 1, 2, 1, 1},
CustomValues: []float64{21421, 523},
}
}
func TestFromIntToFloatOrIntHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
// v1 does not support nhcb.
testIntHistWithoutNHCB := testIntHistogram()
testIntHistWithoutNHCB.CustomValues = nil
testFloatHistWithoutNHCB := testFloatHistogram()
testFloatHistWithoutNHCB.CustomValues = nil
h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram())
require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testIntHist := testIntHistogram()
testFloatHist := testFloatHistogram()
h := writev2.FromIntHistogram(123, &testIntHist)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHist, *h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
}
func TestFromFloatToFloatHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
// v1 does not support nhcb.
testFloatHistWithoutNHCB := testFloatHistogram()
testFloatHistWithoutNHCB.CustomValues = nil
h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())
require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testFloatHist := testFloatHistogram()
h := writev2.FromFloatHistogram(123, &testFloatHist)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
}
func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) {
for _, tc := range []struct {
input histogram.CounterResetHint
expectedV1 prompb.Histogram_ResetHint
expectedV2 writev2.Histogram_ResetHint
}{
{
input: histogram.UnknownCounterReset,
expectedV1: prompb.Histogram_UNKNOWN,
expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED,
},
{
input: histogram.CounterReset,
expectedV1: prompb.Histogram_YES,
expectedV2: writev2.Histogram_RESET_HINT_YES,
},
{
input: histogram.NotCounterReset,
expectedV1: prompb.Histogram_NO,
expectedV2: writev2.Histogram_RESET_HINT_NO,
},
{
input: histogram.GaugeType,
expectedV1: prompb.Histogram_GAUGE,
expectedV2: writev2.Histogram_RESET_HINT_GAUGE,
},
} {
t.Run("", func(t *testing.T) {
t.Run("v1", func(t *testing.T) {
h := testIntHistogram()
h.CounterResetHint = tc.input
got := prompb.FromIntHistogram(1337, &h)
require.Equal(t, tc.expectedV1, got.GetResetHint())
fh := testFloatHistogram()
fh.CounterResetHint = tc.input
got2 := prompb.FromFloatHistogram(1337, &fh)
require.Equal(t, tc.expectedV1, got2.GetResetHint())
})
t.Run("v2", func(t *testing.T) {
h := testIntHistogram()
h.CounterResetHint = tc.input
got := writev2.FromIntHistogram(1337, &h)
require.Equal(t, tc.expectedV2, got.GetResetHint())
fh := testFloatHistogram()
fh.CounterResetHint = tc.input
got2 := writev2.FromFloatHistogram(1337, &fh)
require.Equal(t, tc.expectedV2, got2.GetResetHint())
})
})
}
}

View file

@ -187,6 +187,21 @@ func rangeQueryCases() []benchCase {
{
expr: "topk(5, a_X)",
},
{
expr: "limitk(1, a_X)",
},
{
expr: "limitk(5, a_X)",
},
{
expr: "limit_ratio(0.1, a_X)",
},
{
expr: "limit_ratio(0.5, a_X)",
},
{
expr: "limit_ratio(-0.5, a_X)",
},
// Combinations.
{
expr: "rate(a_X[1m]) + rate(b_X[1m])",

View file

@ -1318,7 +1318,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
index, ok := groupToResultIndex[groupingKey]
// Add a new group if it doesn't exist.
if !ok {
if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK {
if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK && aggExpr.Op != parser.LIMITK && aggExpr.Op != parser.LIMIT_RATIO {
m := generateGroupingLabels(enh, series.Metric, aggExpr.Without, sortedGrouping)
result = append(result, Series{Metric: m})
}
@ -1331,9 +1331,10 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
groups := make([]groupedAggregation, groupCount)
var k int
var ratio float64
var seriess map[uint64]Series
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK:
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
if !convertibleToInt64(param) {
ev.errorf("Scalar value %v overflows int64", param)
}
@ -1345,6 +1346,23 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
return nil, warnings
}
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
case parser.LIMIT_RATIO:
if math.IsNaN(param) {
ev.errorf("Ratio value %v is NaN", param)
}
switch {
case param == 0:
return nil, warnings
case param < -1.0:
ratio = -1.0
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
case param > 1.0:
ratio = 1.0
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
default:
ratio = param
}
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
case parser.QUANTILE:
if math.IsNaN(param) || param < 0 || param > 1 {
warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange()))
@ -1362,11 +1380,12 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
enh.Ts = ts
var ws annotations.Annotations
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK:
result, ws = ev.aggregationK(aggExpr, k, inputMatrix, seriesToResult, groups, enh, seriess)
case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO:
result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess)
// If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp {
return result, ws
warnings.Merge(ws)
return result, warnings
}
default:
ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh)
@ -1381,7 +1400,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
// Assemble the output matrix. By the time we get here we know we don't have too many samples.
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK:
case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO:
result = make(Matrix, 0, len(seriess))
for _, ss := range seriess {
result = append(result, ss)
@ -2754,14 +2773,15 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
}
type groupedAggregation struct {
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64 // Mean, or "compensating value" for Kahan summation.
groupCount int
heap vectorByValueHeap
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated.
floatValue float64
histogramValue *histogram.FloatHistogram
floatMean float64 // Mean, or "compensating value" for Kahan summation.
groupCount int
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group
heap vectorByValueHeap
}
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
@ -2958,19 +2978,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
return annos
}
// aggregationK evaluates topk or bottomk at one timestep on inputMatrix.
// aggregationK evaluates topk, bottomk, limitk, or limit_ratio at one timestep on inputMatrix.
// Output that has the same labels as the input, but just k of them per group.
// seriesToResult maps inputMatrix indexes to groups indexes.
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk.
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio.
// For a range query, aggregates output in the seriess map.
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
op := e.Op
var s Sample
var annos annotations.Annotations
// Used to short-cut the loop for LIMITK if we already collected k elements for every group
groupsRemaining := len(groups)
for i := range groups {
groups[i].seen = false
}
seriesLoop:
for si := range inputMatrix {
f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si])
if !ok {
@ -2981,11 +3004,23 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma
group := &groups[seriesToResult[si]]
// Initialize this group if it's the first time we've seen it.
if !group.seen {
*group = groupedAggregation{
seen: true,
heap: make(vectorByValueHeap, 1, k),
// LIMIT_RATIO is a special case, as we may not add this very sample to the heap,
// while we also don't know the final size of it.
if op == parser.LIMIT_RATIO {
*group = groupedAggregation{
seen: true,
heap: make(vectorByValueHeap, 0),
}
if ratiosampler.AddRatioSample(r, &s) {
heap.Push(&group.heap, &s)
}
} else {
*group = groupedAggregation{
seen: true,
heap: make(vectorByValueHeap, 1, k),
}
group.heap[0] = s
}
group.heap[0] = s
continue
}
@ -3016,6 +3051,26 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma
}
}
case parser.LIMITK:
if len(group.heap) < k {
heap.Push(&group.heap, &s)
}
// LIMITK optimization: early break if we've added K elem to _every_ group,
// especially useful for large timeseries where the user is exploring labels via e.g.
// limitk(10, my_metric)
if !group.groupAggrComplete && len(group.heap) == k {
group.groupAggrComplete = true
groupsRemaining--
if groupsRemaining == 0 {
break seriesLoop
}
}
case parser.LIMIT_RATIO:
if ratiosampler.AddRatioSample(r, &s) {
heap.Push(&group.heap, &s)
}
default:
panic(fmt.Errorf("expected aggregation operator but got %q", op))
}
@ -3065,6 +3120,11 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma
for _, v := range aggr.heap {
add(v.Metric, v.F)
}
case parser.LIMITK, parser.LIMIT_RATIO:
for _, v := range aggr.heap {
add(v.Metric, v.F)
}
}
}
@ -3390,6 +3450,12 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) {
// required for correctness.
func detectHistogramStatsDecoding(expr parser.Expr) {
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop")
}
n, ok := (node).(*parser.VectorSelector)
if !ok {
return nil
@ -3419,6 +3485,56 @@ func makeInt64Pointer(val int64) *int64 {
return valp
}
// Add RatioSampler interface to allow unit-testing (previously: Randomizer).
type RatioSampler interface {
// Return this sample "offset" between [0.0, 1.0]
sampleOffset(ts int64, sample *Sample) float64
AddRatioSample(r float64, sample *Sample) bool
}
// Use Hash(labels.String()) / maxUint64 as a "deterministic"
// value in [0.0, 1.0].
type HashRatioSampler struct{}
var ratiosampler RatioSampler = NewHashRatioSampler()
func NewHashRatioSampler() *HashRatioSampler {
return &HashRatioSampler{}
}
func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 {
const (
float64MaxUint64 = float64(math.MaxUint64)
)
return float64(sample.Metric.Hash()) / float64MaxUint64
}
func (s *HashRatioSampler) AddRatioSample(ratioLimit float64, sample *Sample) bool {
// If ratioLimit >= 0: add sample if sampleOffset is lesser than ratioLimit
//
// 0.0 ratioLimit 1.0
// [---------|--------------------------]
// [#########...........................]
//
// e.g.:
// sampleOffset==0.3 && ratioLimit==0.4
// 0.3 < 0.4 ? --> add sample
//
// Else if ratioLimit < 0: add sample if rand() return the "complement" of ratioLimit>=0 case
// (loosely similar behavior to negative array index in other programming languages)
//
// 0.0 1+ratioLimit 1.0
// [---------|--------------------------]
// [.........###########################]
//
// e.g.:
// sampleOffset==0.3 && ratioLimit==-0.6
// 0.3 >= 0.4 ? --> don't add sample
sampleOffset := s.sampleOffset(sample.T, sample)
return (ratioLimit >= 0 && sampleOffset < ratioLimit) ||
(ratioLimit < 0 && sampleOffset >= (1.0+ratioLimit))
}
type histogramStatsSeries struct {
storage.Series
}

View file

@ -49,6 +49,8 @@ const (
)
func TestMain(m *testing.M) {
// Enable experimental functions testing
parser.EnableExperimentalFunctions = true
goleak.VerifyTestMain(m)
}
@ -236,11 +238,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab
return errSeriesSet{err: q.err}
}
func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (*errQuerier) Close() error { return nil }

View file

@ -993,10 +993,14 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
t := sum + inc
switch {
case math.IsInf(t, 0):
c = 0
// Using Neumaier improvement, swap if next term larger than sum.
if math.Abs(sum) >= math.Abs(inc) {
case math.Abs(sum) >= math.Abs(inc):
c += (sum - t) + inc
} else {
default:
c += (inc - t) + sum
}
return t, c

View file

@ -0,0 +1,81 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"fmt"
"math"
"testing"
"github.com/stretchr/testify/require"
)
func TestKahanSumInc(t *testing.T) {
testCases := map[string]struct {
first float64
second float64
expected float64
}{
"+Inf + anything = +Inf": {
first: math.Inf(1),
second: 2.0,
expected: math.Inf(1),
},
"-Inf + anything = -Inf": {
first: math.Inf(-1),
second: 2.0,
expected: math.Inf(-1),
},
"+Inf + -Inf = NaN": {
first: math.Inf(1),
second: math.Inf(-1),
expected: math.NaN(),
},
"NaN + anything = NaN": {
first: math.NaN(),
second: 2,
expected: math.NaN(),
},
"NaN + Inf = NaN": {
first: math.NaN(),
second: math.Inf(1),
expected: math.NaN(),
},
"NaN + -Inf = NaN": {
first: math.NaN(),
second: math.Inf(-1),
expected: math.NaN(),
},
}
runTest := func(t *testing.T, a, b, expected float64) {
t.Run(fmt.Sprintf("%v + %v = %v", a, b, expected), func(t *testing.T) {
sum, c := kahanSumInc(b, a, 0)
result := sum + c
if math.IsNaN(expected) {
require.Truef(t, math.IsNaN(result), "expected result to be NaN, but got %v (from %v + %v)", result, sum, c)
} else {
require.Equalf(t, expected, result, "expected result to be %v, but got %v (from %v + %v)", expected, result, sum, c)
}
})
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
runTest(t, testCase.first, testCase.second, testCase.expected)
runTest(t, testCase.second, testCase.first, testCase.expected)
})
}
}

View file

@ -43,7 +43,6 @@ import (
int int64
uint uint64
float float64
duration time.Duration
}
@ -126,6 +125,8 @@ STDDEV
STDVAR
SUM
TOPK
LIMITK
LIMIT_RATIO
%token aggregatorsEnd
// Keywords.
@ -174,8 +175,7 @@ START_METRIC_SELECTOR
%type <int> int
%type <uint> uint
%type <float> number series_value signed_number signed_or_unsigned_number
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%type <duration> duration maybe_duration
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%start start
@ -216,7 +216,7 @@ expr :
| binary_expr
| function_call
| matrix_selector
| number_literal
| number_duration_literal
| offset_expr
| paren_expr
| string_literal
@ -413,18 +413,22 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN
* Offset modifiers.
*/
offset_expr: expr OFFSET duration
offset_expr: expr OFFSET number_duration_literal
{
yylex.(*parser).addOffset($1, $3)
$$ = $1
numLit, _ := $3.(*NumberLiteral)
dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, dur)
$$ = $1
}
| expr OFFSET SUB duration
| expr OFFSET SUB number_duration_literal
{
yylex.(*parser).addOffset($1, -$4)
$$ = $1
numLit, _ := $4.(*NumberLiteral)
dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, -dur)
$$ = $1
}
| expr OFFSET error
{ yylex.(*parser).unexpected("offset", "duration"); $$ = $1 }
{ yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
;
/*
* @ modifiers.
@ -450,7 +454,7 @@ at_modifier_preprocessors: START | END;
* Subquery and range selectors.
*/
matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
{
var errMsg string
vs, ok := $1.(*VectorSelector)
@ -467,32 +471,44 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
yylex.(*parser).addParseErrf(errRange, errMsg)
}
numLit, _ := $3.(*NumberLiteral)
$$ = &MatrixSelector{
VectorSelector: $1.(Expr),
Range: $3,
Range: time.Duration(numLit.Val * 1000) * time.Millisecond,
EndPos: yylex.(*parser).lastClosing,
}
}
;
subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET
{
numLitRange, _ := $3.(*NumberLiteral)
numLitStep, _ := $5.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
Range: $3,
Step: $5,
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond,
EndPos: $6.Pos + 1,
}
}
| expr LEFT_BRACKET duration COLON duration error
| expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET
{
numLitRange, _ := $3.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: 0,
EndPos: $5.Pos + 1,
}
}
| expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
| expr LEFT_BRACKET duration COLON error
{ yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET duration error
| expr LEFT_BRACKET number_duration_literal COLON error
{ yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET number_duration_literal error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error
{ yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 }
{ yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
;
/*
@ -609,7 +625,7 @@ metric : metric_identifier label_set
;
metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END;
metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO;
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
{ $$ = labels.New($2...) }
@ -851,10 +867,10 @@ bucket_set_list : bucket_set_list SPACE number
* Keyword lists.
*/
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO;
unary_op : ADD | SUB;
@ -864,16 +880,43 @@ match_op : EQL | NEQ | EQL_REGEX | NEQ_REGEX ;
* Literals.
*/
number_literal : NUMBER
number_duration_literal : NUMBER
{
$$ = &NumberLiteral{
$$ = &NumberLiteral{
Val: yylex.(*parser).number($1.Val),
PosRange: $1.PositionRange(),
}
}
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = &NumberLiteral{
Val: dur.Seconds(),
PosRange: $1.PositionRange(),
}
}
;
number : NUMBER { $$ = yylex.(*parser).number($1.Val) } ;
number : NUMBER
{
$$ = yylex.(*parser).number($1.Val)
}
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = dur.Seconds()
}
;
signed_number : ADD number { $$ = $2 }
| SUB number { $$ = -$2 }
@ -895,17 +938,6 @@ int : SUB uint { $$ = -int64($2) }
| uint { $$ = int64($1) }
;
duration : DURATION
{
var err error
$$, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
}
;
string_literal : STRING
{
$$ = &StringLiteral{
@ -929,11 +961,6 @@ string_identifier : STRING
* Wrappers for optional arguments.
*/
maybe_duration : /* empty */
{$$ = 0}
| duration
;
maybe_grouping_labels: /* empty */ { $$ = nil }
| grouping_labels
;

File diff suppressed because it is too large Load diff

View file

@ -65,7 +65,7 @@ func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggre
// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
// Returns false otherwise.
func (i ItemType) IsAggregatorWithParam() bool {
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE || i == LIMITK || i == LIMIT_RATIO
}
// IsKeyword returns true if the Item corresponds to a keyword.
@ -118,6 +118,8 @@ var key = map[string]ItemType{
"bottomk": BOTTOMK,
"count_values": COUNT_VALUES,
"quantile": QUANTILE,
"limitk": LIMITK,
"limit_ratio": LIMIT_RATIO,
// Keywords.
"offset": OFFSET,
@ -476,7 +478,7 @@ func lexStatements(l *Lexer) stateFn {
skipSpaces(l)
}
l.bracketOpen = true
return lexDuration
return lexNumberOrDuration
case r == ']':
if !l.bracketOpen {
return l.errorf("unexpected right bracket %q", r)
@ -844,18 +846,6 @@ func lexLineComment(l *Lexer) stateFn {
return lexStatements
}
func lexDuration(l *Lexer) stateFn {
if l.scanNumber() {
return l.errorf("missing unit character in duration")
}
if !acceptRemainingDuration(l) {
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
}
l.backup()
l.emit(DURATION)
return lexStatements
}
// lexNumber scans a number: decimal, hex, oct or float.
func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() {
@ -907,6 +897,7 @@ func acceptRemainingDuration(l *Lexer) bool {
// scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser.
func (l *Lexer) scanNumber() bool {
initialPos := l.pos
// Modify the digit pattern if the number is hexadecimal.
digitPattern := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
@ -978,7 +969,10 @@ func (l *Lexer) scanNumber() bool {
// Handle digits at the end since we already consumed before this loop.
l.acceptRun(digitPattern)
}
// Empty string is not a valid number.
if l.pos == initialPos {
return false
}
// Next thing must not be alphanumeric unless it's the times token
// for series repetitions.
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {

View file

@ -447,6 +447,10 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
desiredArgs := 1
if ret.Op.IsAggregatorWithParam() {
if !EnableExperimentalFunctions && (ret.Op == LIMITK || ret.Op == LIMIT_RATIO) {
p.addParseErrf(ret.PositionRange(), "limitk() and limit_ratio() are experimental and must be enabled with --enable-feature=promql-experimental-functions")
return
}
desiredArgs = 2
ret.Param = arguments[0]
@ -672,7 +676,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
p.addParseErrf(n.PositionRange(), "aggregation operator expected in aggregation expression but got %q", n.Op)
}
p.expectType(n.Expr, ValueTypeVector, "aggregation expression")
if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE {
if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE || n.Op == LIMITK || n.Op == LIMIT_RATIO {
p.expectType(n.Param, ValueTypeScalar, "aggregation parameter")
}
if n.Op == COUNT_VALUES {

View file

@ -2133,6 +2133,115 @@ var testExpr = []struct {
EndPos: 25,
},
},
{
input: `test{a="b"}[5m] OFFSET 3600`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
OriginalOffset: 1 * time.Hour,
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, "a", "b"),
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 27,
},
},
{
input: `foo[3ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 3 * time.Millisecond,
EndPos: 16,
},
},
{
input: `foo[4s180ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 20,
},
},
{
input: `foo[4.18] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 17,
},
},
{
input: `foo[4s18ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 19,
},
},
{
input: `foo[4.018] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 18,
},
},
{
input: `test{a="b"}[5y] @ 1603774699`,
expected: &MatrixSelector{
@ -2152,15 +2261,50 @@ var testExpr = []struct {
EndPos: 28,
},
},
{
input: "test[5]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 5 * time.Second,
EndPos: 7,
},
},
{
input: `some_metric[5m] @ 1m`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "some_metric",
Timestamp: makeInt64Pointer(60000),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 20,
},
},
{
input: `foo[5mm]`,
fail: true,
errMsg: "bad duration syntax: \"5mm\"",
errMsg: "bad number or duration syntax: \"5mm\"",
},
{
input: `foo[5m1]`,
fail: true,
errMsg: "bad duration syntax: \"5m1\"",
errMsg: "bad number or duration syntax: \"5m1\"",
},
{
input: `foo[5m:1m1]`,
@ -2194,17 +2338,12 @@ var testExpr = []struct {
{
input: `foo[]`,
fail: true,
errMsg: "missing unit character in duration",
errMsg: "bad number or duration syntax: \"\"",
},
{
input: `foo[1]`,
input: `foo[-1]`,
fail: true,
errMsg: "missing unit character in duration",
},
{
input: `some_metric[5m] OFFSET 1`,
fail: true,
errMsg: "unexpected number \"1\" in offset, expected duration",
errMsg: "bad number or duration syntax: \"\"",
},
{
input: `some_metric[5m] OFFSET 1mm`,
@ -2214,18 +2353,13 @@ var testExpr = []struct {
{
input: `some_metric[5m] OFFSET`,
fail: true,
errMsg: "unexpected end of input in offset, expected duration",
errMsg: "unexpected end of input in offset, expected number or duration",
},
{
input: `some_metric OFFSET 1m[5m]`,
fail: true,
errMsg: "1:22: parse error: no offset modifiers allowed before range",
},
{
input: `some_metric[5m] @ 1m`,
fail: true,
errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp",
},
{
input: `some_metric[5m] @`,
fail: true,
@ -2910,6 +3044,11 @@ var testExpr = []struct {
errMsg: "illegal character U+002E '.' in escape sequence",
},
// Subquery.
{
input: `foo{bar="baz"}[`,
fail: true,
errMsg: `1:16: parse error: bad number or duration syntax: ""`,
},
{
input: `foo{bar="baz"}[10m:6s]`,
expected: &SubqueryExpr{

View file

@ -23,6 +23,7 @@ import (
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/teststorage"
)
@ -45,6 +46,8 @@ func TestConcurrentRangeQueries(t *testing.T) {
MaxSamples: 50000000,
Timeout: 100 * time.Second,
}
// Enable experimental functions testing
parser.EnableExperimentalFunctions = true
engine := promql.NewEngine(opts)
const interval = 10000 // 10s interval.

View file

@ -55,6 +55,11 @@ const (
DefaultMaxSamplesPerQuery = 10000
)
type TBRun interface {
testing.TB
Run(string, func(*testing.T)) bool
}
var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements.
@ -89,7 +94,7 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp
}
// RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) {
func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true

View file

@ -511,10 +511,39 @@ load 10s
data{test="ten",point="b"} 8
data{test="ten",point="c"} 1e+100
data{test="ten",point="d"} -1e100
data{test="pos_inf",group="1",point="a"} Inf
data{test="pos_inf",group="1",point="b"} 2
data{test="pos_inf",group="2",point="a"} 2
data{test="pos_inf",group="2",point="b"} Inf
data{test="neg_inf",group="1",point="a"} -Inf
data{test="neg_inf",group="1",point="b"} 2
data{test="neg_inf",group="2",point="a"} 2
data{test="neg_inf",group="2",point="b"} -Inf
data{test="inf_inf",point="a"} Inf
data{test="inf_inf",point="b"} -Inf
data{test="nan",group="1",point="a"} NaN
data{test="nan",group="1",point="b"} 2
data{test="nan",group="2",point="a"} 2
data{test="nan",group="2",point="b"} NaN
eval instant at 1m sum(data{test="ten"})
{} 10
eval instant at 1m sum by (group) (data{test="pos_inf"})
{group="1"} Inf
{group="2"} Inf
eval instant at 1m sum by (group) (data{test="neg_inf"})
{group="1"} -Inf
{group="2"} -Inf
eval instant at 1m sum(data{test="inf_inf"})
{} NaN
eval instant at 1m sum by (group) (data{test="nan"})
{group="1"} NaN
{group="2"} NaN
clear
# Test that aggregations are deterministic.

View file

@ -10,22 +10,54 @@ eval instant at 10s metric @ 100
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 100s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 1m40s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 100 offset 50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 100 offset 50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset 50s @ 100
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset 50 @ 100
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset -50s @ 0
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset -50 @ 0
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s -metric @ 100
{job="1"} -10
{job="2"} -20
@ -48,6 +80,12 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100 offset 50s)
eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100)
{job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100 offset 50)
{job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100)
{job="1"} 15
# Different timestamps.
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
{job="1"} 15
@ -58,6 +96,9 @@ eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metri
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
{job="1"} 165
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100) + label_replace(sum_over_time(metric{job="2"}[100] @ 100), "job", "1", "", "")
{job="1"} 165
# Subqueries.
# 10*(1+2+...+9) + 10.
@ -72,6 +113,10 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] @ 100 offset 20s)
eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
{job="1"} 288
# 10*(1+2+...+7) + 8.
eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
{job="1"} 288
# Subquery with different timestamps.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.

View file

@ -10,6 +10,11 @@ eval instant at 50m resets(http_requests[5m])
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[300])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[20m])
{path="/foo"} 1
{path="/bar"} 0
@ -239,10 +244,16 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
{} 76.81818181818181
# intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h)
{} 76.81818181818181
# intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 51.36363636363637

View file

@ -73,22 +73,32 @@ eval instant at 50m histogram_count(testhistogram3)
{start="positive"} 110
{start="negative"} 20
# Classic way of accessing the count still works.
eval instant at 50m testhistogram3_count
testhistogram3_count{start="positive"} 110
testhistogram3_count{start="negative"} 20
# Test histogram_sum.
eval instant at 50m histogram_sum(testhistogram3)
{start="positive"} 330
{start="negative"} 80
# Test histogram_avg.
# Classic way of accessing the sum still works.
eval instant at 50m testhistogram3_sum
testhistogram3_sum{start="positive"} 330
testhistogram3_sum{start="negative"} 80
# Test histogram_avg. This has no classic equivalent.
eval instant at 50m histogram_avg(testhistogram3)
{start="positive"} 3
{start="negative"} 4
# Test histogram_stddev.
# Test histogram_stddev. This has no classic equivalent.
eval instant at 50m histogram_stddev(testhistogram3)
{start="positive"} 2.8189265757336734
{start="negative"} 4.182715937754936
# Test histogram_stdvar.
# Test histogram_stdvar. This has no classic equivalent.
eval instant at 50m histogram_stdvar(testhistogram3)
{start="positive"} 7.946347039377573
{start="negative"} 17.495112615949154
@ -103,137 +113,282 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m]))
{start="positive"} 0.6363636363636364
{start="negative"} 0
# Test histogram_quantile.
# In the classic histogram, we can access the corresponding bucket (if
# it exists) and divide by the count to get the same result.
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
{start="positive"} 0.6363636363636364
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m])
{start="positive"} 0.6363636363636364
# Test histogram_quantile, native and classic.
eval instant at 50m histogram_quantile(0, testhistogram3)
{start="positive"} 0
{start="negative"} -0.25
eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
{start="positive"} 0
{start="negative"} -0.25
eval instant at 50m histogram_quantile(0.25, testhistogram3)
{start="positive"} 0.055
{start="negative"} -0.225
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
{start="positive"} 0.055
{start="negative"} -0.225
eval instant at 50m histogram_quantile(0.5, testhistogram3)
{start="positive"} 0.125
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
{start="positive"} 0.125
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.75, testhistogram3)
{start="positive"} 0.45
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
{start="positive"} 0.45
{start="negative"} -0.15
eval instant at 50m histogram_quantile(1, testhistogram3)
{start="positive"} 1
{start="negative"} -0.1
eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
{start="positive"} 1
{start="negative"} -0.1
# Quantile too low.
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram)
{start="positive"} -Inf
{start="negative"} -Inf
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf
{start="negative"} -Inf
# Quantile too high.
eval_warn instant at 50m histogram_quantile(1.01, testhistogram)
{start="positive"} +Inf
{start="negative"} +Inf
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf
{start="negative"} +Inf
# Quantile invalid.
eval_warn instant at 50m histogram_quantile(NaN, testhistogram)
{start="positive"} NaN
{start="negative"} NaN
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN
{start="negative"} NaN
# Quantile value in lowest bucket.
eval instant at 50m histogram_quantile(0, testhistogram)
{start="positive"} 0
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0, testhistogram_bucket)
{start="positive"} 0
{start="negative"} -0.2
# Quantile value in highest bucket.
eval instant at 50m histogram_quantile(1, testhistogram)
{start="positive"} 1
{start="negative"} 0.3
eval instant at 50m histogram_quantile(1, testhistogram_bucket)
{start="positive"} 1
{start="negative"} 0.3
# Finally some useful quantiles.
eval instant at 50m histogram_quantile(0.2, testhistogram)
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram)
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, testhistogram)
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="positive"} 0.72
{start="negative"} 0.3
# More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m]))
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m]))
{start="positive"} 0.72
{start="negative"} 0.3
# Want results exactly in the middle of the bucket.
eval instant at 7m histogram_quantile(1./6., testhistogram2)
{} 1
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
{} 1
eval instant at 7m histogram_quantile(0.5, testhistogram2)
{} 3
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
{} 3
eval instant at 7m histogram_quantile(5./6., testhistogram2)
{} 5
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
{} 5
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
{} 1
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
{} 1
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
{} 3
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
{} 3
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
{} 5
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
{} 5
# Aggregated histogram: Everything in one.
# Aggregated histogram: Everything in one. Note how native histograms
# don't require aggregation by le.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])))
{} 0.1277777777777778
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m])))
{} 0.12777777777777778
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le))
{} 0.12777777777777778
# Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
# Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job))
{job="job1"} 0.14
{job="job2"} 0.1125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job))
{job="job1"} 0.14
{job="job2"} 0.1125
# Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
@ -241,18 +396,32 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu
{instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667
# All NHCBs summed into one.
eval instant at 50m sum(request_duration_seconds)
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
@ -303,11 +472,13 @@ load_with_nhcb 5m
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m]))
{instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
# https://github.com/prometheus/prometheus/issues/9910
load_with_nhcb 5m
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})

119
promql/promqltest/testdata/limit.test vendored Normal file
View file

@ -0,0 +1,119 @@
# Tests for limitk
#
# NB: those many `and http_requests` are to ensure that the series _are_ indeed
# a subset of the original series.
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
http_requests{job="api-server", instance="2", group="canary"} 0+50x10
http_requests{job="api-server", instance="3", group="canary"} 0+60x10
eval instant at 50m count(limitk by (group) (0, http_requests))
# empty
eval instant at 50m count(limitk by (group) (-1, http_requests))
# empty
# Exercise k==1 special case (as sample is added before the main series loop
eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests)
{} 2
eval instant at 50m count(limitk by (group) (2, http_requests) and http_requests)
{} 4
eval instant at 50m count(limitk(100, http_requests) and http_requests)
{} 6
# Exercise k==1 special case (as sample is added before the main series loop
eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests)
{} 2
eval instant at 50m count(limitk by (group) (2, http_requests) and http_requests)
{} 4
eval instant at 50m count(limitk(100, http_requests) and http_requests)
{} 6
# limit_ratio
eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests))
# empty
# limitk(2, ...) should always return a 2-count subset of the timeseries (hence the AND'ing)
eval range from 0 to 50m step 5m count(limitk(2, http_requests) and http_requests)
{} 2+0x10
# Tests for limit_ratio
#
# NB: below 0.5 ratio will depend on some hashing "luck" (also there's no guarantee that
# an integer comes from: total number of series * ratio), as it depends on:
#
# * ratioLimit = [0.0, 1.0]:
# float64(sample.Metric.Hash()) / float64MaxUint64 < Ratio ?
# * ratioLimit = [-1.0, 1.0):
# float64(sample.Metric.Hash()) / float64MaxUint64 >= (1.0 + Ratio) ?
#
# See `AddRatioSample()` in promql/engine.go for more details.
# Half~ish samples: verify we get "near" 3 (of 0.5 * 6)
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) <= bool (3+1)
{} 1+0x10
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) >= bool (3-1)
{} 1+0x10
# All samples
eval range from 0 to 50m step 5m count(limit_ratio(1.0, http_requests) and http_requests)
{} 6+0x10
# All samples
eval range from 0 to 50m step 5m count(limit_ratio(-1.0, http_requests) and http_requests)
{} 6+0x10
# Capped to 1.0 -> all samples
eval_warn range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests)
{} 6+0x10
# Capped to -1.0 -> all samples
eval_warn range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests)
{} 6+0x10
# Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other
# Complement below for [0.2, -0.8]
#
# Complement 1of2: `or` should return all samples
eval range from 0 to 50m step 5m count(limit_ratio(0.2, http_requests) or limit_ratio(-0.8, http_requests))
{} 6+0x10
# Complement 2of2: `and` should return no samples
eval range from 0 to 50m step 5m count(limit_ratio(0.2, http_requests) and limit_ratio(-0.8, http_requests))
# empty
# Complement below for [0.5, -0.5]
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) or limit_ratio(-0.5, http_requests))
{} 6+0x10
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and limit_ratio(-0.5, http_requests))
# empty
# Complement below for [0.8, -0.2]
eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) or limit_ratio(-0.2, http_requests))
{} 6+0x10
eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit_ratio(-0.2, http_requests))
# empty
# Complement below for [some_ratio, 1.0 - some_ratio], some_ratio derived from time(),
# using a small prime number to avoid rounded ratio values, and a small set of them.
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio(1.0 - (time() % 17/17), http_requests))
{} 6+0x10
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio(1.0 - (time() % 17/17), http_requests))
# empty
# Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time)
# The check giving: 1 (i.e. true)
eval range from 0 to 50m step 5m abs(avg(limit_ratio(0.5, http_requests)) - avg(limit_ratio(-0.5, http_requests))) <= bool stddev(http_requests)
{} 1+0x10

View file

@ -355,10 +355,10 @@ load 10m
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
{} NaN
{} Inf
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
{} NaN
{} Inf
# Apply quantile function to histogram with all positive buckets with zero bucket.
load 10m
@ -715,6 +715,9 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
{} 1
eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4)
{} 100
clear
# Counter reset only noticeable in a single bucket.

View file

@ -32,6 +32,9 @@ eval instant at 20s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[10s])
{} 1
eval instant at 20s count_over_time(metric[10])
{} 1
clear

View file

@ -76,6 +76,21 @@ eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s)
eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time(metric1[30:10] offset 3)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
{} 297
# Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.4

View file

@ -151,7 +151,42 @@ func (g *Group) Name() string { return g.name }
func (g *Group) File() string { return g.file }
// Rules returns the group's rules.
func (g *Group) Rules() []Rule { return g.rules }
func (g *Group) Rules(matcherSets ...[]*labels.Matcher) []Rule {
if len(matcherSets) == 0 {
return g.rules
}
var rules []Rule
for _, rule := range g.rules {
if matchesMatcherSets(matcherSets, rule.Labels()) {
rules = append(rules, rule)
}
}
return rules
}
func matches(lbls labels.Labels, matchers ...*labels.Matcher) bool {
for _, m := range matchers {
if v := lbls.Get(m.Name); !m.Matches(v) {
return false
}
}
return true
}
// matchesMatcherSets ensures all matches in each matcher set are ANDed and the set of those is ORed.
func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) bool {
if len(matcherSets) == 0 {
return true
}
var ok bool
for _, matchers := range matcherSets {
if matches(lbls, matchers...) {
ok = true
}
}
return ok
}
// Queryable returns the group's querable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }
@ -586,14 +621,12 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
}
}
// If the rule has no dependencies, it can run concurrently because no other rules in this group depend on its output.
// Try run concurrently if there are slots available.
if ctrl := g.concurrencyController; isRuleEligibleForConcurrentExecution(rule) && ctrl.Allow() {
if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) {
wg.Add(1)
go eval(i, rule, func() {
wg.Done()
ctrl.Done()
ctrl.Done(ctx)
})
} else {
eval(i, rule, nil)
@ -1059,7 +1092,3 @@ func buildDependencyMap(rules []Rule) dependencyMap {
return dependencies
}
func isRuleEligibleForConcurrentExecution(rule Rule) bool {
return rule.NoDependentRules() && rule.NoDependencyRules()
}

View file

@ -380,13 +380,13 @@ func (m *Manager) RuleGroups() []*Group {
}
// Rules returns the list of the manager's rules.
func (m *Manager) Rules() []Rule {
func (m *Manager) Rules(matcherSets ...[]*labels.Matcher) []Rule {
m.mtx.RLock()
defer m.mtx.RUnlock()
var rules []Rule
for _, g := range m.groups {
rules = append(rules, g.rules...)
rules = append(rules, g.Rules(matcherSets...)...)
}
return rules
@ -457,67 +457,47 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) {
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
// server with additional query load. Concurrency is controlled globally, not on a per-group basis.
type RuleConcurrencyController interface {
// Allow determines whether any concurrent evaluation slots are available.
// If Allow() returns true, then Done() must be called to release the acquired slot.
Allow() bool
// Allow determines if the given rule is allowed to be evaluated concurrently.
// If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
// It is important that both *Group and Rule are not retained and only be used for the duration of the call.
Allow(ctx context.Context, group *Group, rule Rule) bool
// Done releases a concurrent evaluation slot.
Done()
Done(ctx context.Context)
}
// concurrentRuleEvalController holds a weighted semaphore which controls the concurrent evaluation of rules.
type concurrentRuleEvalController struct {
sema *semaphore.Weighted
depMapsMu sync.Mutex
depMaps map[*Group]dependencyMap
sema *semaphore.Weighted
}
func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyController {
return &concurrentRuleEvalController{
sema: semaphore.NewWeighted(maxConcurrency),
depMaps: map[*Group]dependencyMap{},
sema: semaphore.NewWeighted(maxConcurrency),
}
}
func (c *concurrentRuleEvalController) RuleEligible(g *Group, r Rule) bool {
c.depMapsMu.Lock()
defer c.depMapsMu.Unlock()
depMap, found := c.depMaps[g]
if !found {
depMap = buildDependencyMap(g.rules)
c.depMaps[g] = depMap
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
// To allow a rule to be executed concurrently, we need 3 conditions:
// 1. The rule must not have any rules that depend on it.
// 2. The rule itself must not depend on any other rules.
// 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot.
if rule.NoDependentRules() && rule.NoDependencyRules() {
return c.sema.TryAcquire(1)
}
return depMap.isIndependent(r)
return false
}
func (c *concurrentRuleEvalController) Allow() bool {
return c.sema.TryAcquire(1)
}
func (c *concurrentRuleEvalController) Done() {
func (c *concurrentRuleEvalController) Done(_ context.Context) {
c.sema.Release(1)
}
func (c *concurrentRuleEvalController) Invalidate() {
c.depMapsMu.Lock()
defer c.depMapsMu.Unlock()
// Clear out the memoized dependency maps because some or all groups may have been updated.
c.depMaps = map[*Group]dependencyMap{}
}
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
type sequentialRuleEvalController struct{}
func (c sequentialRuleEvalController) RuleEligible(_ *Group, _ Rule) bool {
func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) bool {
return false
}
func (c sequentialRuleEvalController) Allow() bool {
return false
}
func (c sequentialRuleEvalController) Done() {}
func (c sequentialRuleEvalController) Invalidate() {}
func (c sequentialRuleEvalController) Done(_ context.Context) {}

View file

@ -73,9 +73,11 @@ type Options struct {
// Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool
// Option to enable the experimental in-memory metadata storage and append
// metadata to the WAL.
EnableMetadataStorage bool
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
// can be written to the WAL and thus read for remote write.
// TODO: implement some form of metadata storage
AppendMetadata bool
// Option to increase the interval used by scrape manager to throttle target groups updates.
DiscoveryReloadInterval model.Duration
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.

View file

@ -34,6 +34,7 @@ type scrapeMetrics struct {
targetScrapePoolExceededTargetLimit prometheus.Counter
targetScrapePoolTargetLimit *prometheus.GaugeVec
targetScrapePoolTargetsAdded *prometheus.GaugeVec
targetScrapePoolSymbolTableItems *prometheus.GaugeVec
targetSyncIntervalLength *prometheus.SummaryVec
targetSyncFailed *prometheus.CounterVec
@ -129,6 +130,13 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
},
[]string{"scrape_job"},
)
sm.targetScrapePoolSymbolTableItems = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "prometheus_target_scrape_pool_symboltable_items",
Help: "Current number of symbols in table for this scrape pool.",
},
[]string{"scrape_job"},
)
sm.targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_target_scrape_pool_sync_total",
@ -234,6 +242,7 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
sm.targetScrapePoolExceededTargetLimit,
sm.targetScrapePoolTargetLimit,
sm.targetScrapePoolTargetsAdded,
sm.targetScrapePoolSymbolTableItems,
sm.targetSyncFailed,
// Used by targetScraper.
sm.targetScrapeExceededBodySizeLimit,
@ -274,6 +283,7 @@ func (sm *scrapeMetrics) Unregister() {
sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetsAdded)
sm.reg.Unregister(sm.targetScrapePoolSymbolTableItems)
sm.reg.Unregister(sm.targetSyncFailed)
sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit)
sm.reg.Unregister(sm.targetScrapeCacheFlushForced)

View file

@ -181,7 +181,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
options.EnableNativeHistogramsIngestion,
options.EnableCreatedTimestampZeroIngestion,
options.ExtraMetrics,
options.EnableMetadataStorage,
options.AppendMetadata,
opts.target,
options.PassMetadataInContext,
metrics,
@ -246,6 +246,7 @@ func (sp *scrapePool) stop() {
sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetScrapePoolSymbolTableItems.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
}
@ -273,6 +274,15 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
sp.restartLoops(reuseCache)
oldClient.CloseIdleConnections()
sp.metrics.targetReloadIntervalLength.WithLabelValues(time.Duration(sp.config.ScrapeInterval).String()).Observe(
time.Since(start).Seconds(),
)
return nil
}
func (sp *scrapePool) restartLoops(reuseCache bool) {
var (
wg sync.WaitGroup
interval = time.Duration(sp.config.ScrapeInterval)
@ -313,7 +323,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
client: sp.client,
timeout: timeout,
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
}
newLoop = sp.newLoop(scrapeLoopOptions{
@ -352,11 +362,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
sp.targetMtx.Unlock()
wg.Wait()
oldClient.CloseIdleConnections()
sp.metrics.targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
time.Since(start).Seconds(),
)
}
// Must be called with sp.mtx held.
func (sp *scrapePool) checkSymbolTable() {
// Here we take steps to clear out the symbol table if it has grown a lot.
// After waiting some time for things to settle, we take the size of the symbol-table.
// If, after some more time, the table has grown to twice that size, we start a new one.
@ -367,11 +376,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
} else if sp.symbolTable.Len() > 2*sp.initialSymbolTableLen {
sp.symbolTable = labels.NewSymbolTable()
sp.initialSymbolTableLen = 0
sp.restartLoops(false) // To drop all caches.
}
sp.lastSymbolTableCheck = time.Now()
}
return nil
}
// Sync converts target groups into actual scrape targets and synchronizes
@ -408,8 +416,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
}
}
}
sp.metrics.targetScrapePoolSymbolTableItems.WithLabelValues(sp.config.JobName).Set(float64(sp.symbolTable.Len()))
sp.targetMtx.Unlock()
sp.sync(all)
sp.checkSymbolTable()
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
time.Since(start).Seconds(),

View file

@ -10,8 +10,9 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
exit 255
fi
# TODO(bwplotka): Move to buf, this is not OSS agnostic, likely won't work locally.
if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
echo "could not find protoc 3.15.8, is it installed + in PATH?"
echo "could not find protoc 3.15.8, is it installed + in PATH? Consider commenting out this check for local flow"
exit 255
fi
@ -40,6 +41,9 @@ for dir in ${DIRS}; do
-I="${PROM_PATH}" \
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
./*.proto
protoc --gogofast_out=plugins=grpc:. -I=. \
-I="${GOGOPROTO_PATH}" \
./io/prometheus/write/v2/*.proto
protoc --gogofast_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,paths=source_relative:. -I=. \
-I="${GOGOPROTO_PATH}" \
./io/prometheus/client/*.proto

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:

View file

@ -181,7 +181,8 @@ func TestFanoutErrors(t *testing.T) {
require.NotEmpty(t, ss.Warnings(), "warnings expected")
w := ss.Warnings()
require.Error(t, w.AsErrors()[0])
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
warn, _ := w.AsStrings("", 0, 0)
require.Equal(t, tc.warning.Error(), warn[0])
}
})
t.Run("chunks", func(t *testing.T) {
@ -207,7 +208,8 @@ func TestFanoutErrors(t *testing.T) {
require.NotEmpty(t, ss.Warnings(), "warnings expected")
w := ss.Warnings()
require.Error(t, w.AsErrors()[0])
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
warn, _ := w.AsStrings("", 0, 0)
require.Equal(t, tc.warning.Error(), warn[0])
}
})
}
@ -236,11 +238,11 @@ func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels
return storage.ErrSeriesSet(errSelect)
}
func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label values error")
}
func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label names error")
}

View file

@ -122,11 +122,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
}
func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
@ -161,12 +161,12 @@ type LabelQuerier interface {
// It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// LabelNames returns all the unique label names present in the block in sorted order.
// If matchers are specified the returned result set is reduced
// to label names of metrics matching the matchers.
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// Close releases the resources of the Querier.
Close() error
@ -190,6 +190,9 @@ type SelectHints struct {
Start int64 // Start time in milliseconds for this select.
End int64 // End time in milliseconds for this select.
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
Step int64 // Query step size in milliseconds.
Func string // String representation of surrounding function or aggregation.
@ -217,6 +220,13 @@ type SelectHints struct {
DisableTrimming bool
}
// LabelHints specifies hints passed for label reads.
// This is used only as an option for implementation to use.
type LabelHints struct {
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
}
// TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc.

View file

@ -136,6 +136,11 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist
return b.it.AtFloatHistogram(nil)
}
// AtT returns the timestamp of the current element of the iterator.
func (b *MemoizedSeriesIterator) AtT() int64 {
return b.it.AtT()
}
// Err returns the last encountered error.
func (b *MemoizedSeriesIterator) Err() error {
return b.it.Err()

View file

@ -29,13 +29,15 @@ func TestMemoizedSeriesIterator(t *testing.T) {
sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
if efh == nil {
ts, v := it.At()
require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, ev, v, "value mismatch")
require.Equal(t, ets, ts, "At() timestamp mismatch")
require.Equal(t, ev, v, "At() value mismatch")
} else {
ts, fh := it.AtFloatHistogram()
require.Equal(t, ets, ts, "timestamp mismatch")
require.Equal(t, efh, fh, "histogram mismatch")
require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch")
require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch")
}
require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch")
}
prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
ts, v, fh, ok := it.PeekPrev()

View file

@ -45,25 +45,24 @@ type mergeGenericQuerier struct {
//
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMergeFunc) Querier {
primaries = filterQueriers(primaries)
secondaries = filterQueriers(secondaries)
switch {
case len(primaries)+len(secondaries) == 0:
case len(primaries) == 0 && len(secondaries) == 0:
return noopQuerier{}
case len(primaries) == 1 && len(secondaries) == 0:
return primaries[0]
case len(primaries) == 0 && len(secondaries) == 1:
return secondaries[0]
return &querierAdapter{newSecondaryQuerierFrom(secondaries[0])}
}
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries {
if _, ok := q.(noopQuerier); !ok && q != nil {
queriers = append(queriers, newGenericQuerierFrom(q))
}
queriers = append(queriers, newGenericQuerierFrom(q))
}
for _, q := range secondaries {
if _, ok := q.(noopQuerier); !ok && q != nil {
queriers = append(queriers, newSecondaryQuerierFrom(q))
}
queriers = append(queriers, newSecondaryQuerierFrom(q))
}
concurrentSelect := false
@ -77,31 +76,40 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer
}}
}
func filterQueriers(qs []Querier) []Querier {
ret := make([]Querier, 0, len(qs))
for _, q := range qs {
if _, ok := q.(noopQuerier); !ok && q != nil {
ret = append(ret, q)
}
}
return ret
}
// NewMergeChunkQuerier returns a new Chunk Querier that merges results of given primary and secondary chunk queriers.
// See NewFanout commentary to learn more about primary vs secondary differences.
//
// In case of overlaps between the data given by primaries' and secondaries' Selects, merge function will be used.
// TODO(bwplotka): Currently merge will compact overlapping chunks with bigger chunk, without limit. Split it: https://github.com/prometheus/tsdb/issues/670
func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn VerticalChunkSeriesMergeFunc) ChunkQuerier {
primaries = filterChunkQueriers(primaries)
secondaries = filterChunkQueriers(secondaries)
switch {
case len(primaries) == 0 && len(secondaries) == 0:
return noopChunkQuerier{}
case len(primaries) == 1 && len(secondaries) == 0:
return primaries[0]
case len(primaries) == 0 && len(secondaries) == 1:
return secondaries[0]
return &chunkQuerierAdapter{newSecondaryQuerierFromChunk(secondaries[0])}
}
queriers := make([]genericQuerier, 0, len(primaries)+len(secondaries))
for _, q := range primaries {
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
queriers = append(queriers, newGenericQuerierFromChunk(q))
}
queriers = append(queriers, newGenericQuerierFromChunk(q))
}
for _, querier := range secondaries {
if _, ok := querier.(noopChunkQuerier); !ok && querier != nil {
queriers = append(queriers, newSecondaryQuerierFromChunk(querier))
}
for _, q := range secondaries {
queriers = append(queriers, newSecondaryQuerierFromChunk(q))
}
concurrentSelect := false
@ -115,6 +123,16 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica
}}
}
func filterChunkQueriers(qs []ChunkQuerier) []ChunkQuerier {
ret := make([]ChunkQuerier, 0, len(qs))
for _, q := range qs {
if _, ok := q.(noopChunkQuerier); !ok && q != nil {
ret = append(ret, q)
}
}
return ret
}
// Select returns a set of series that matches the given label matchers.
func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) genericSeriesSet {
seriesSets := make([]genericSeriesSet, 0, len(q.queriers))
@ -169,8 +187,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
// LabelValues returns all potential values for a label name.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(ctx, q.queriers, name, matchers...)
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...)
if err != nil {
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
}
@ -178,22 +196,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matc
}
// lvals performs merge sort for LabelValues from multiple queriers.
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if lq.Len() == 0 {
return nil, nil, nil
}
if lq.Len() == 1 {
return lq.Get(0).LabelValues(ctx, n, matchers...)
return lq.Get(0).LabelValues(ctx, n, hints, matchers...)
}
a, b := lq.SplitByHalf()
var ws annotations.Annotations
s1, w, err := q.lvals(ctx, a, n, matchers...)
s1, w, err := q.lvals(ctx, a, n, hints, matchers...)
ws.Merge(w)
if err != nil {
return nil, ws, err
}
s2, ws, err := q.lvals(ctx, b, n, matchers...)
s2, ws, err := q.lvals(ctx, b, n, hints, matchers...)
ws.Merge(w)
if err != nil {
return nil, ws, err
@ -229,13 +247,13 @@ func mergeStrings(a, b []string) []string {
}
// LabelNames returns all the unique label names present in all queriers in sorted order.
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
var (
labelNamesMap = make(map[string]struct{})
warnings annotations.Annotations
)
for _, querier := range q.queriers {
names, wrn, err := querier.LabelNames(ctx, matchers...)
names, wrn, err := querier.LabelNames(ctx, hints, matchers...)
if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings.
warnings.Merge(wrn)

View file

@ -912,9 +912,23 @@ func TestConcatenatingChunkIterator(t *testing.T) {
}
type mockQuerier struct {
LabelQuerier
mtx sync.Mutex
toReturn []Series
toReturn []Series // Response for Select.
closed bool
labelNamesCalls int
labelNamesRequested []labelNameRequest
sortedSeriesRequested []bool
resp []string // Response for LabelNames and LabelValues; turned into Select response if toReturn is not supplied.
warnings annotations.Annotations
err error
}
type labelNameRequest struct {
name string
matchers []*labels.Matcher
}
type seriesByLabel []Series
@ -924,13 +938,47 @@ func (a seriesByLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a seriesByLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 }
func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) SeriesSet {
cpy := make([]Series, len(m.toReturn))
copy(cpy, m.toReturn)
m.mtx.Lock()
defer m.mtx.Unlock()
m.sortedSeriesRequested = append(m.sortedSeriesRequested, sortSeries)
var ret []Series
if len(m.toReturn) > 0 {
ret = make([]Series, len(m.toReturn))
copy(ret, m.toReturn)
} else if len(m.resp) > 0 {
ret = make([]Series, 0, len(m.resp))
for _, l := range m.resp {
ret = append(ret, NewListSeries(labels.FromStrings("test", l), nil))
}
}
if sortSeries {
sort.Sort(seriesByLabel(cpy))
sort.Sort(seriesByLabel(ret))
}
return NewMockSeriesSet(cpy...)
return &mockSeriesSet{idx: -1, series: ret, warnings: m.warnings, err: m.err}
}
func (m *mockQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
matchers: matchers,
})
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockQuerier) Close() error {
m.closed = true
return nil
}
type mockChunkQuerier struct {
@ -960,6 +1008,9 @@ func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectH
type mockSeriesSet struct {
idx int
series []Series
warnings annotations.Annotations
err error
}
func NewMockSeriesSet(series ...Series) SeriesSet {
@ -970,15 +1021,18 @@ func NewMockSeriesSet(series ...Series) SeriesSet {
}
func (m *mockSeriesSet) Next() bool {
if m.err != nil {
return false
}
m.idx++
return m.idx < len(m.series)
}
func (m *mockSeriesSet) At() Series { return m.series[m.idx] }
func (m *mockSeriesSet) Err() error { return nil }
func (m *mockSeriesSet) Err() error { return m.err }
func (m *mockSeriesSet) Warnings() annotations.Annotations { return nil }
func (m *mockSeriesSet) Warnings() annotations.Annotations { return m.warnings }
type mockChunkSeriesSet struct {
idx int
@ -1336,105 +1390,44 @@ func BenchmarkMergeSeriesSet(b *testing.B) {
}
}
type mockGenericQuerier struct {
mtx sync.Mutex
closed bool
labelNamesCalls int
labelNamesRequested []labelNameRequest
sortedSeriesRequested []bool
resp []string
warnings annotations.Annotations
err error
}
type labelNameRequest struct {
name string
matchers []*labels.Matcher
}
func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _ ...*labels.Matcher) genericSeriesSet {
m.mtx.Lock()
m.sortedSeriesRequested = append(m.sortedSeriesRequested, b)
m.mtx.Unlock()
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
}
func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
matchers: matchers,
})
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) Close() error {
m.closed = true
return nil
}
type mockGenericSeriesSet struct {
resp []string
warnings annotations.Annotations
err error
curr int
}
func (m *mockGenericSeriesSet) Next() bool {
if m.err != nil {
return false
func visitMockQueriers(t *testing.T, qr Querier, f func(t *testing.T, q *mockQuerier)) int {
count := 0
switch x := qr.(type) {
case *mockQuerier:
count++
f(t, x)
case *querierAdapter:
count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f)
}
if m.curr >= len(m.resp) {
return false
return count
}
func visitMockQueriersInGenericQuerier(t *testing.T, g genericQuerier, f func(t *testing.T, q *mockQuerier)) int {
count := 0
switch x := g.(type) {
case *mergeGenericQuerier:
for _, q := range x.queriers {
count += visitMockQueriersInGenericQuerier(t, q, f)
}
case *genericQuerierAdapter:
// Visitor for chunkQuerier not implemented.
count += visitMockQueriers(t, x.q, f)
case *secondaryQuerier:
count += visitMockQueriersInGenericQuerier(t, x.genericQuerier, f)
}
m.curr++
return true
return count
}
func (m *mockGenericSeriesSet) Err() error { return m.err }
func (m *mockGenericSeriesSet) Warnings() annotations.Annotations { return m.warnings }
func (m *mockGenericSeriesSet) At() Labels {
return mockLabels(m.resp[m.curr-1])
}
type mockLabels string
func (l mockLabels) Labels() labels.Labels {
return labels.FromStrings("test", string(l))
}
func unwrapMockGenericQuerier(t *testing.T, qr genericQuerier) *mockGenericQuerier {
m, ok := qr.(*mockGenericQuerier)
if !ok {
s, ok := qr.(*secondaryQuerier)
require.True(t, ok, "expected secondaryQuerier got something else")
m, ok = s.genericQuerier.(*mockGenericQuerier)
require.True(t, ok, "expected mockGenericQuerier got something else")
}
return m
}
func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
func TestMergeQuerierWithSecondaries_ErrorHandling(t *testing.T) {
var (
errStorage = errors.New("storage error")
warnStorage = errors.New("storage warning")
ctx = context.Background()
)
for _, tcase := range []struct {
name string
queriers []genericQuerier
name string
primaries []Querier
secondaries []Querier
expectedSelectsSeries []labels.Labels
expectedLabels []string
@ -1443,10 +1436,8 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
expectedErrs [4]error
}{
{
// NewMergeQuerier will not create a mergeGenericQuerier
// with just one querier inside, but we can test it anyway.
name: "one successful primary querier",
queriers: []genericQuerier{&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
name: "one successful primary querier",
primaries: []Querier{&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil}},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
labels.FromStrings("test", "b"),
@ -1455,9 +1446,9 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
},
{
name: "multiple successful primary queriers",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&mockGenericQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil},
primaries: []Querier{
&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"b", "c"}, warnings: nil, err: nil},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
@ -1468,15 +1459,17 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
},
{
name: "one failed primary querier",
queriers: []genericQuerier{&mockGenericQuerier{warnings: nil, err: errStorage}},
primaries: []Querier{&mockQuerier{warnings: nil, err: errStorage}},
expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage},
},
{
name: "one successful primary querier with successful secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
primaries: []Querier{
&mockQuerier{resp: []string{"a", "b"}, warnings: nil, err: nil},
},
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
@ -1487,10 +1480,12 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
},
{
name: "one successful primary querier with empty response and successful secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
primaries: []Querier{
&mockQuerier{resp: []string{}, warnings: nil, err: nil},
},
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "b"),
@ -1500,19 +1495,42 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
},
{
name: "one failed primary querier with successful secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{warnings: nil, err: errStorage},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: nil}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: nil}},
primaries: []Querier{
&mockQuerier{warnings: nil, err: errStorage},
},
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: nil},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: nil},
},
expectedErrs: [4]error{errStorage, errStorage, errStorage, errStorage},
},
{
name: "nil primary querier with failed secondary",
primaries: nil,
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
},
expectedLabels: []string{},
expectedWarnings: annotations.New().Add(errStorage),
},
{
name: "nil primary querier with two failed secondaries",
primaries: nil,
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage},
},
expectedLabels: []string{},
expectedWarnings: annotations.New().Add(errStorage),
},
{
name: "one successful primary querier with failed secondaries",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a"}, warnings: nil, err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: nil, err: errStorage}},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"c"}, warnings: nil, err: errStorage}},
primaries: []Querier{
&mockQuerier{resp: []string{"a"}, warnings: nil, err: nil},
},
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: nil, err: errStorage},
&mockQuerier{resp: []string{"c"}, warnings: nil, err: errStorage},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
@ -1522,9 +1540,11 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
},
{
name: "successful queriers with warnings",
queriers: []genericQuerier{
&mockGenericQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil},
&secondaryQuerier{genericQuerier: &mockGenericQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil}},
primaries: []Querier{
&mockQuerier{resp: []string{"a"}, warnings: annotations.New().Add(warnStorage), err: nil},
},
secondaries: []Querier{
&mockQuerier{resp: []string{"b"}, warnings: annotations.New().Add(warnStorage), err: nil},
},
expectedSelectsSeries: []labels.Labels{
labels.FromStrings("test", "a"),
@ -1535,10 +1555,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
},
} {
t.Run(tcase.name, func(t *testing.T) {
q := &mergeGenericQuerier{
queriers: tcase.queriers,
mergeFn: func(l ...Labels) Labels { return l[0] },
}
q := NewMergeQuerier(tcase.primaries, tcase.secondaries, func(s ...Series) Series { return s[0] })
t.Run("Select", func(t *testing.T) {
res := q.Select(context.Background(), false, nil)
@ -1551,65 +1568,70 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
require.ErrorIs(t, res.Err(), tcase.expectedErrs[0], "expected error doesn't match")
require.Equal(t, tcase.expectedSelectsSeries, lbls)
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
// mergeGenericQuerier forces all Selects to be sorted.
require.Equal(t, []bool{true}, m.sortedSeriesRequested)
}
n := visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
// Single queries should be unsorted; merged queries sorted.
exp := len(tcase.primaries)+len(tcase.secondaries) > 1
require.Equal(t, []bool{exp}, m.sortedSeriesRequested)
})
// Check we visited all queriers.
require.Equal(t, len(tcase.primaries)+len(tcase.secondaries), n)
})
t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames(ctx)
res, w, err := q.LabelNames(ctx, nil)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
requireEqualSlice(t, tcase.expectedLabels, res)
if err != nil {
return
}
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
require.Equal(t, 1, m.labelNamesCalls)
}
})
})
t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues(ctx, "test")
res, w, err := q.LabelValues(ctx, "test", nil)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
requireEqualSlice(t, tcase.expectedLabels, res)
if err != nil {
return
}
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
require.Equal(t, []labelNameRequest{{name: "test"}}, m.labelNamesRequested)
}
})
})
t.Run("LabelValuesWithMatchers", func(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
res, w, err := q.LabelValues(ctx, "test2", matcher)
res, w, err := q.LabelValues(ctx, "test2", nil, matcher)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
requireEqualSlice(t, tcase.expectedLabels, res)
if err != nil {
return
}
for _, qr := range q.queriers {
m := unwrapMockGenericQuerier(t, qr)
visitMockQueriers(t, q, func(t *testing.T, m *mockQuerier) {
require.Equal(t, []labelNameRequest{
{name: "test"},
{name: "test2", matchers: []*labels.Matcher{matcher}},
}, m.labelNamesRequested)
}
})
})
})
}
}
// Check slice but ignore difference between nil and empty.
func requireEqualSlice[T any](t require.TestingT, a, b []T, msgAndArgs ...interface{}) {
if len(a) == 0 {
require.Empty(t, b, msgAndArgs...)
} else {
require.Equal(t, a, b, msgAndArgs...)
}
}
type errIterator struct {
err error
}

View file

@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche
return NoopSeriesSet()
}
func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M
return NoopChunkedSeriesSet()
}
func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}

View file

@ -14,7 +14,6 @@
package remote
import (
"bufio"
"bytes"
"context"
"fmt"
@ -35,13 +34,40 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/trace"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote/azuread"
)
const maxErrMsgLen = 1024
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
const (
RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version"
RemoteWriteVersion1HeaderValue = "0.1.0"
RemoteWriteVersion20HeaderValue = "2.0.0"
appProtoContentType = "application/x-protobuf"
)
// Compression represents the encoding. Currently remote storage supports only
// one, but we experiment with more, thus leaving the compression scaffolding
// for now.
// NOTE(bwplotka): Keeping it public, as a non-stable help for importers to use.
type Compression string
const (
// SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt
SnappyBlockCompression Compression = "snappy"
)
var (
// UserAgent represents Prometheus version to use for user agent header.
UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
}
)
var (
remoteReadQueriesTotal = prometheus.NewCounterVec(
@ -93,6 +119,9 @@ type Client struct {
readQueries prometheus.Gauge
readQueriesTotal *prometheus.CounterVec
readQueriesDuration prometheus.Observer
writeProtoMsg config.RemoteWriteProtoMsg
writeCompression Compression // Not exposed by ClientConfig for now.
}
// ClientConfig configures a client.
@ -104,6 +133,7 @@ type ClientConfig struct {
AzureADConfig *azuread.AzureADConfig
Headers map[string]string
RetryOnRateLimit bool
WriteProtoMsg config.RemoteWriteProtoMsg
}
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
@ -162,14 +192,20 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
}
}
httpClient.Transport = otelhttp.NewTransport(t)
writeProtoMsg := config.RemoteWriteProtoMsgV1
if conf.WriteProtoMsg != "" {
writeProtoMsg = conf.WriteProtoMsg
}
httpClient.Transport = otelhttp.NewTransport(t)
return &Client{
remoteName: name,
urlString: conf.URL.String(),
Client: httpClient,
retryOnRateLimit: conf.RetryOnRateLimit,
timeout: time.Duration(conf.Timeout),
writeProtoMsg: writeProtoMsg,
writeCompression: SnappyBlockCompression,
}, nil
}
@ -198,18 +234,24 @@ type RecoverableError struct {
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteResponseStats, error) {
httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req))
if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not
// recoverable.
return err
return WriteResponseStats{}, err
}
httpReq.Header.Add("Content-Encoding", "snappy")
httpReq.Header.Set("Content-Type", "application/x-protobuf")
httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg])
httpReq.Header.Set("User-Agent", UserAgent)
httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 {
// Compatibility mode for 1.0.
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
} else {
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
}
if attempt > 0 {
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
}
@ -224,28 +266,34 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
if err != nil {
// Errors from Client.Do are from (for example) network errors, so are
// recoverable.
return RecoverableError{err, defaultBackoff}
return WriteResponseStats{}, RecoverableError{err, defaultBackoff}
}
defer func() {
io.Copy(io.Discard, httpResp.Body)
httpResp.Body.Close()
}()
// TODO(bwplotka): Pass logger and emit debug on error?
// Parsing error means there were some response header values we can't parse,
// we can continue handling.
rs, _ := ParseWriteResponseStats(httpResp)
//nolint:usestdlibvars
if httpResp.StatusCode/100 != 2 {
scanner := bufio.NewScanner(io.LimitReader(httpResp.Body, maxErrMsgLen))
line := ""
if scanner.Scan() {
line = scanner.Text()
}
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, line)
if httpResp.StatusCode/100 == 2 {
return rs, nil
}
// Handling errors e.g. read potential error in the body.
// TODO(bwplotka): Pass logger and emit debug on error?
body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen))
err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body)
//nolint:usestdlibvars
if httpResp.StatusCode/100 == 5 ||
(c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) {
return RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))}
}
return err
return rs, err
}
// retryAfterDuration returns the duration for the Retry-After header. In case of any errors, it
@ -265,12 +313,12 @@ func retryAfterDuration(t string) model.Duration {
}
// Name uniquely identifies the client.
func (c Client) Name() string {
func (c *Client) Name() string {
return c.remoteName
}
// Endpoint is the remote read or write endpoint.
func (c Client) Endpoint() string {
func (c *Client) Endpoint() string {
return c.urlString
}

View file

@ -73,7 +73,7 @@ func TestStoreHTTPErrorHandling(t *testing.T) {
c, err := NewWriteClient(hash, conf)
require.NoError(t, err)
err = c.Store(context.Background(), []byte{}, 0)
_, err = c.Store(context.Background(), []byte{}, 0)
if test.err != nil {
require.EqualError(t, err, test.err.Error())
} else {
@ -133,7 +133,7 @@ func TestClientRetryAfter(t *testing.T) {
c := getClient(getClientConfig(serverURL, tc.retryOnRateLimit))
var recErr RecoverableError
err = c.Store(context.Background(), []byte{}, 0)
_, err = c.Store(context.Background(), []byte{}, 0)
require.Equal(t, tc.expectedRecoverable, errors.As(err, &recErr), "Mismatch in expected recoverable error status.")
if tc.expectedRecoverable {
require.Equal(t, tc.expectedRetryAfter, recErr.retryAfter)
@ -169,7 +169,7 @@ func TestRetryAfterDuration(t *testing.T) {
}
}
func TestClientHeaders(t *testing.T) {
func TestClientCustomHeaders(t *testing.T) {
headersToSend := map[string]string{"Foo": "Bar", "Baz": "qux"}
var called bool
@ -203,7 +203,7 @@ func TestClientHeaders(t *testing.T) {
c, err := NewWriteClient("c", conf)
require.NoError(t, err)
err = c.Store(context.Background(), []byte{}, 0)
_, err = c.Store(context.Background(), []byte{}, 0)
require.NoError(t, err)
require.True(t, called, "The remote server wasn't called")

View file

@ -22,7 +22,6 @@ import (
"net/http"
"slices"
"sort"
"strings"
"sync"
"github.com/gogo/protobuf/proto"
@ -30,10 +29,10 @@ import (
"github.com/prometheus/common/model"
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
@ -153,10 +152,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
})
case chunkenc.ValHistogram:
ts, h := iter.AtHistogram(nil)
histograms = append(histograms, HistogramToHistogramProto(ts, h))
histograms = append(histograms, prompb.FromIntHistogram(ts, h))
case chunkenc.ValFloatHistogram:
ts, fh := iter.AtFloatHistogram(nil)
histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
histograms = append(histograms, prompb.FromFloatHistogram(ts, fh))
default:
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
}
@ -166,7 +165,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
}
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
Labels: LabelsToLabelsProto(series.Labels(), nil),
Labels: prompb.FromLabels(series.Labels(), nil),
Samples: samples,
Histograms: histograms,
})
@ -182,7 +181,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
return errSeriesSet{err: err}
}
lbls := LabelProtosToLabels(&b, ts.Labels)
lbls := ts.ToLabels(&b, nil)
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
}
@ -235,7 +234,7 @@ func StreamChunkedReadResponses(
for ss.Next() {
series := ss.At()
iter = series.Iterator(iter)
lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels)
maxDataLength := maxBytesInFrame
for _, lbl := range lbls {
@ -481,21 +480,16 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
panic("iterator is not on an integer histogram sample")
}
h := c.series.histograms[c.histogramsCur]
return h.Timestamp, HistogramProtoToHistogram(h)
return h.Timestamp, h.ToIntHistogram()
}
// AtFloatHistogram implements chunkenc.Iterator.
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
switch c.curValType {
case chunkenc.ValHistogram:
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
fh := c.series.histograms[c.histogramsCur]
return fh.Timestamp, HistogramProtoToFloatHistogram(fh)
case chunkenc.ValFloatHistogram:
fh := c.series.histograms[c.histogramsCur]
return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh)
default:
panic("iterator is not on a histogram sample")
return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
}
panic("iterator is not on a histogram sample")
}
// AtT implements chunkenc.Iterator.
@ -618,141 +612,6 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
return result, nil
}
func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemplar.Exemplar {
timestamp := ep.Timestamp
return exemplar.Exemplar{
Labels: LabelProtosToLabels(b, ep.Labels),
Value: ep.Value,
Ts: timestamp,
HasTs: timestamp != 0,
}
}
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
// provided proto message. The caller has to make sure that the proto message
// represents an integer histogram and not a float histogram, or it panics.
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
if hp.IsFloatHistogram() {
panic("HistogramProtoToHistogram called with a float histogram")
}
return &histogram.Histogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema,
ZeroThreshold: hp.ZeroThreshold,
ZeroCount: hp.GetZeroCountInt(),
Count: hp.GetCountInt(),
Sum: hp.Sum,
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
PositiveBuckets: hp.GetPositiveDeltas(),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: hp.GetNegativeDeltas(),
}
}
// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the
// provided proto message to a Float Histogram. The caller has to make sure that
// the proto message represents a float histogram and not an integer histogram,
// or it panics.
func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
if !hp.IsFloatHistogram() {
panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
}
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema,
ZeroThreshold: hp.ZeroThreshold,
ZeroCount: hp.GetZeroCountFloat(),
Count: hp.GetCountFloat(),
Sum: hp.Sum,
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
PositiveBuckets: hp.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: hp.GetNegativeCounts(),
}
}
// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message
// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a
// float histogram, or it panics.
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
if hp.IsFloatHistogram() {
panic("HistogramProtoToFloatHistogram called with a float histogram")
}
return &histogram.FloatHistogram{
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
Schema: hp.Schema,
ZeroThreshold: hp.ZeroThreshold,
ZeroCount: float64(hp.GetZeroCountInt()),
Count: float64(hp.GetCountInt()),
Sum: hp.Sum,
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
}
}
func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
for i := 0; i < len(s); i++ {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
func deltasToCounts(deltas []int64) []float64 {
counts := make([]float64, len(deltas))
var cur float64
for i, d := range deltas {
cur += float64(d)
counts[i] = cur
}
return counts
}
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
return prompb.Histogram{
Count: &prompb.Histogram_CountInt{CountInt: h.Count},
Sum: h.Sum,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
NegativeSpans: spansToSpansProto(h.NegativeSpans),
NegativeDeltas: h.NegativeBuckets,
PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets,
ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint),
Timestamp: timestamp,
}
}
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
return prompb.Histogram{
Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
Sum: fh.Sum,
Schema: fh.Schema,
ZeroThreshold: fh.ZeroThreshold,
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
NegativeCounts: fh.NegativeBuckets,
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint),
Timestamp: timestamp,
}
}
func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
spans := make([]prompb.BucketSpan, len(s))
for i := 0; i < len(s); i++ {
spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
return spans
}
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
metric := make(model.Metric, len(labelPairs))
@ -762,43 +621,9 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
return metric
}
// LabelProtosToLabels transforms prompb labels into labels. The labels builder
// will be used to build the returned labels.
func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels {
b.Reset()
for _, l := range labelPairs {
b.Add(l.Name, l.Value)
}
b.Sort()
return b.Labels()
}
// LabelsToLabelsProto transforms labels into prompb labels. The buffer slice
// will be used to avoid allocations if it is big enough to store the labels.
func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
result := buf[:0]
lbls.Range(func(l labels.Label) {
result = append(result, prompb.Label{
Name: l.Name,
Value: l.Value,
})
})
return result
}
// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType {
mt := strings.ToUpper(string(t))
v, ok := prompb.MetricMetadata_MetricType_value[mt]
if !ok {
return prompb.MetricMetadata_UNKNOWN
}
return prompb.MetricMetadata_MetricType(v)
}
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
// snappy decompression.
// Used also by documentation/examples/remote_storage.
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
compressed, err := io.ReadAll(r)
if err != nil {
@ -818,6 +643,28 @@ func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
return &req, nil
}
// DecodeWriteV2Request from an io.Reader into a writev2.Request, handling
// snappy decompression.
// Used also by documentation/examples/remote_storage.
func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) {
compressed, err := io.ReadAll(r)
if err != nil {
return nil, err
}
reqBuf, err := snappy.Decode(nil, compressed)
if err != nil {
return nil, err
}
var req writev2.Request
if err := proto.Unmarshal(reqBuf, &req); err != nil {
return nil, err
}
return &req, nil
}
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
contentType := r.Header.Get("Content-Type")
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)

View file

@ -19,13 +19,16 @@ import (
"sync"
"testing"
"github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/metadata"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
@ -33,45 +36,128 @@ import (
"github.com/prometheus/prometheus/util/annotations"
)
var testHistogram = histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 0,
Sum: 20,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{-1},
}
var (
testHistogram = histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
Count: 0,
Sum: 20,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{1},
NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}},
NegativeBuckets: []int64{-1},
}
var writeRequestFixture = &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
writeRequestFixture = &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
},
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
{
Labels: []prompb.Label{
{Name: "__name__", Value: "test_metric1"},
{Name: "b", Value: "c"},
{Name: "baz", Value: "qux"},
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat(nil))},
},
},
}
writeV2RequestSeries1Metadata = metadata.Metadata{
Type: model.MetricTypeGauge,
Help: "Test gauge for test purposes",
Unit: "Maybe op/sec who knows (:",
}
writeV2RequestSeries2Metadata = metadata.Metadata{
Type: model.MetricTypeCounter,
Help: "Test counter for test purposes",
}
// writeV2RequestFixture represents the same request as writeRequestFixture,
// but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata.
// NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
writeV2RequestFixture = &writev2.Request{
Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
Timeseries: []writev2.TimeSeries{
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type.
HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type.
HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help.
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
}
)
func TestWriteV2RequestFixture(t *testing.T) {
// Generate dynamically writeV2RequestFixture, reusing v1 fixture elements.
st := writev2.NewSymbolTable()
b := labels.NewScratchBuilder(0)
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
expected := &writev2.Request{
Timeseries: []writev2.TimeSeries{
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE,
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
Symbols: st.Symbols(),
}
// Check if it matches static writeV2RequestFixture.
require.Equal(t, expected, writeV2RequestFixture)
}
func TestValidateLabelsAndMetricName(t *testing.T) {
@ -268,7 +354,7 @@ func TestConcreteSeriesIterator_HistogramSamples(t *testing.T) {
} else {
ts = int64(i)
}
histProtos[i] = HistogramToHistogramProto(ts, h)
histProtos[i] = prompb.FromIntHistogram(ts, h)
}
series := &concreteSeries{
labels: labels.FromStrings("foo", "bar"),
@ -319,9 +405,9 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
histProtos := make([]prompb.Histogram, len(histograms))
for i, h := range histograms {
if i < 10 {
histProtos[i] = HistogramToHistogramProto(int64(i+1), h)
histProtos[i] = prompb.FromIntHistogram(int64(i+1), h)
} else {
histProtos[i] = HistogramToHistogramProto(int64(i+6), h)
histProtos[i] = prompb.FromIntHistogram(int64(i+6), h)
}
}
series := &concreteSeries{
@ -401,7 +487,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
require.Equal(t, chunkenc.ValHistogram, it.Next())
ts, fh = it.AtFloatHistogram(nil)
require.Equal(t, int64(17), ts)
expected := HistogramProtoToFloatHistogram(HistogramToHistogramProto(int64(17), histograms[11]))
expected := prompb.FromIntHistogram(int64(17), histograms[11]).ToFloatHistogram()
require.Equal(t, expected, fh)
// Keep calling Next() until the end.
@ -485,39 +571,8 @@ func TestMergeLabels(t *testing.T) {
}
}
func TestMetricTypeToMetricTypeProto(t *testing.T) {
tc := []struct {
desc string
input model.MetricType
expected prompb.MetricMetadata_MetricType
}{
{
desc: "with a single-word metric",
input: model.MetricTypeCounter,
expected: prompb.MetricMetadata_COUNTER,
},
{
desc: "with a two-word metric",
input: model.MetricTypeStateset,
expected: prompb.MetricMetadata_STATESET,
},
{
desc: "with an unknown metric",
input: "not-known",
expected: prompb.MetricMetadata_UNKNOWN,
},
}
for _, tt := range tc {
t.Run(tt.desc, func(t *testing.T) {
m := metricTypeToMetricTypeProto(tt.input)
require.Equal(t, tt.expected, m)
})
}
}
func TestDecodeWriteRequest(t *testing.T) {
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
require.NoError(t, err)
actual, err := DecodeWriteRequest(bytes.NewReader(buf))
@ -525,212 +580,18 @@ func TestDecodeWriteRequest(t *testing.T) {
require.Equal(t, writeRequestFixture, actual)
}
func TestNilHistogramProto(*testing.T) {
// This function will panic if it impromperly handles nil
// values, causing the test to fail.
HistogramProtoToHistogram(prompb.Histogram{})
HistogramProtoToFloatHistogram(prompb.Histogram{})
}
func TestDecodeWriteV2Request(t *testing.T) {
buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
require.NoError(t, err)
func exampleHistogram() histogram.Histogram {
return histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 19,
Sum: 2.7,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 5},
{Offset: 1, Length: 0},
{Offset: 0, Length: 1},
},
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
}
}
func exampleHistogramProto() prompb.Histogram {
return prompb.Histogram{
Count: &prompb.Histogram_CountInt{CountInt: 19},
Sum: 2.7,
Schema: 0,
ZeroThreshold: 0,
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
NegativeSpans: []prompb.BucketSpan{
{
Offset: 0,
Length: 5,
},
{
Offset: 1,
Length: 0,
},
{
Offset: 0,
Length: 1,
},
},
NegativeDeltas: []int64{1, 2, -2, 1, -1, 0},
PositiveSpans: []prompb.BucketSpan{
{
Offset: 0,
Length: 4,
},
{
Offset: 0,
Length: 0,
},
{
Offset: 0,
Length: 3,
},
},
PositiveDeltas: []int64{1, 2, -2, 1, -1, 0, 0},
ResetHint: prompb.Histogram_GAUGE,
Timestamp: 1337,
}
}
func TestHistogramToProtoConvert(t *testing.T) {
tests := []struct {
input histogram.CounterResetHint
expected prompb.Histogram_ResetHint
}{
{
input: histogram.UnknownCounterReset,
expected: prompb.Histogram_UNKNOWN,
},
{
input: histogram.CounterReset,
expected: prompb.Histogram_YES,
},
{
input: histogram.NotCounterReset,
expected: prompb.Histogram_NO,
},
{
input: histogram.GaugeType,
expected: prompb.Histogram_GAUGE,
},
}
for _, test := range tests {
h := exampleHistogram()
h.CounterResetHint = test.input
p := exampleHistogramProto()
p.ResetHint = test.expected
require.Equal(t, p, HistogramToHistogramProto(1337, &h))
require.Equal(t, h, *HistogramProtoToHistogram(p))
}
}
func exampleFloatHistogram() histogram.FloatHistogram {
return histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Count: 19,
Sum: 2.7,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []float64{1, 2, -2, 1, -1, 0, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 5},
{Offset: 1, Length: 0},
{Offset: 0, Length: 1},
},
NegativeBuckets: []float64{1, 2, -2, 1, -1, 0},
}
}
func exampleFloatHistogramProto() prompb.Histogram {
return prompb.Histogram{
Count: &prompb.Histogram_CountFloat{CountFloat: 19},
Sum: 2.7,
Schema: 0,
ZeroThreshold: 0,
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: 0},
NegativeSpans: []prompb.BucketSpan{
{
Offset: 0,
Length: 5,
},
{
Offset: 1,
Length: 0,
},
{
Offset: 0,
Length: 1,
},
},
NegativeCounts: []float64{1, 2, -2, 1, -1, 0},
PositiveSpans: []prompb.BucketSpan{
{
Offset: 0,
Length: 4,
},
{
Offset: 0,
Length: 0,
},
{
Offset: 0,
Length: 3,
},
},
PositiveCounts: []float64{1, 2, -2, 1, -1, 0, 0},
ResetHint: prompb.Histogram_GAUGE,
Timestamp: 1337,
}
}
func TestFloatHistogramToProtoConvert(t *testing.T) {
tests := []struct {
input histogram.CounterResetHint
expected prompb.Histogram_ResetHint
}{
{
input: histogram.UnknownCounterReset,
expected: prompb.Histogram_UNKNOWN,
},
{
input: histogram.CounterReset,
expected: prompb.Histogram_YES,
},
{
input: histogram.NotCounterReset,
expected: prompb.Histogram_NO,
},
{
input: histogram.GaugeType,
expected: prompb.Histogram_GAUGE,
},
}
for _, test := range tests {
h := exampleFloatHistogram()
h.CounterResetHint = test.input
p := exampleFloatHistogramProto()
p.ResetHint = test.expected
require.Equal(t, p, FloatHistogramToHistogramProto(1337, &h))
require.Equal(t, h, *FloatHistogramProtoToFloatHistogram(p))
}
actual, err := DecodeWriteV2Request(bytes.NewReader(buf))
require.NoError(t, err)
require.Equal(t, writeV2RequestFixture, actual)
}
func TestStreamResponse(t *testing.T) {
lbs1 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
lbs2 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
lbs1 := prompb.FromLabels(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
lbs2 := prompb.FromLabels(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
chunk := prompb.Chunk{
Type: prompb.Chunk_XOR,
Data: make([]byte, 100),
@ -802,7 +663,7 @@ func (c *mockChunkSeriesSet) Next() bool {
func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
return &storage.ChunkSeriesEntry{
Lset: LabelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels),
Lset: c.chunkedSeries[c.index].ToLabels(&c.builder, nil),
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
return &mockChunkIterator{
chunks: c.chunkedSeries[c.index].Chunks,

View file

@ -39,9 +39,3 @@ func (m *maxTimestamp) Get() float64 {
defer m.mtx.Unlock()
return m.value
}
func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) {
if m.Get() > 0 {
m.Gauge.Collect(c)
}
}

View file

@ -27,7 +27,7 @@ import (
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
type MetadataAppender interface {
AppendMetadata(context.Context, []scrape.MetricMetadata)
AppendWatcherMetadata(context.Context, []scrape.MetricMetadata)
}
// Watchable represents from where we fetch active targets for metadata.
@ -146,7 +146,7 @@ func (mw *MetadataWatcher) collect() {
}
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata)
mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata)
}
func (mw *MetadataWatcher) ready() bool {

View file

@ -57,7 +57,7 @@ type writeMetadataToMock struct {
metadataAppended int
}
func (mwtm *writeMetadataToMock) AppendMetadata(_ context.Context, m []scrape.MetricMetadata) {
func (mwtm *writeMetadataToMock) AppendWatcherMetadata(_ context.Context, m []scrape.MetricMetadata) {
mwtm.metadataAppended += len(m)
}

View file

@ -29,7 +29,6 @@ import (
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
var unitMap = map[string]string{
// Time
"d": "days",
"h": "hours",
@ -111,7 +110,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
// Build a normalized name for the specified metric
func normalizeName(metric pmetric.Metric, namespace string) string {
// Split metric name in "tokens" (remove all non-alphanumeric)
nameTokens := strings.FieldsFunc(
metric.Name(),

View file

@ -19,7 +19,6 @@ package prometheus
import "strings"
var wordToUCUM = map[string]string{
// Time
"days": "d",
"hours": "h",

View file

@ -65,14 +65,14 @@ type bucketBoundsData struct {
bound float64
}
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds
// byBucketBoundsData enables the usage of sort.Sort() with a slice of bucket bounds.
type byBucketBoundsData []bucketBoundsData
func (m byBucketBoundsData) Len() int { return len(m) }
func (m byBucketBoundsData) Less(i, j int) bool { return m[i].bound < m[j].bound }
func (m byBucketBoundsData) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
// ByLabelName enables the usage of sort.Sort() with a slice of labels
// ByLabelName enables the usage of sort.Sort() with a slice of labels.
type ByLabelName []prompb.Label
func (a ByLabelName) Len() int { return len(a) }
@ -115,14 +115,23 @@ var seps = []byte{'\xff'}
// createAttributes creates a slice of Prometheus Labels with OTLP attributes and pairs of string values.
// Unpaired string values are ignored. String pairs overwrite OTLP labels if collisions happen and
// if logOnOverwrite is true, the overwrite is logged. Resulting label names are sanitized.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string,
// If settings.PromoteResourceAttributes is not empty, it's a set of resource attributes that should be promoted to labels.
func createAttributes(resource pcommon.Resource, attributes pcommon.Map, settings Settings,
ignoreAttrs []string, logOnOverwrite bool, extras ...string) []prompb.Label {
resourceAttrs := resource.Attributes()
serviceName, haveServiceName := resourceAttrs.Get(conventions.AttributeServiceName)
instance, haveInstanceID := resourceAttrs.Get(conventions.AttributeServiceInstanceID)
promotedAttrs := make([]prompb.Label, 0, len(settings.PromoteResourceAttributes))
for _, name := range settings.PromoteResourceAttributes {
if value, exists := resourceAttrs.Get(name); exists {
promotedAttrs = append(promotedAttrs, prompb.Label{Name: name, Value: value.AsString()})
}
}
sort.Stable(ByLabelName(promotedAttrs))
// Calculate the maximum possible number of labels we could return so we can preallocate l
maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2
maxLabelCount := attributes.Len() + len(settings.ExternalLabels) + len(promotedAttrs) + len(extras)/2
if haveServiceName {
maxLabelCount++
@ -132,9 +141,6 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
maxLabelCount++
}
// map ensures no duplicate label name
l := make(map[string]string, maxLabelCount)
// Ensure attributes are sorted by key for consistent merging of keys which
// collide when sanitized.
labels := make([]prompb.Label, 0, maxLabelCount)
@ -148,6 +154,8 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
})
sort.Stable(ByLabelName(labels))
// map ensures no duplicate label names.
l := make(map[string]string, maxLabelCount)
for _, label := range labels {
var finalKey = prometheustranslator.NormalizeLabel(label.Name)
if existingValue, alreadyExists := l[finalKey]; alreadyExists {
@ -157,6 +165,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
}
}
for _, lbl := range promotedAttrs {
normalized := prometheustranslator.NormalizeLabel(lbl.Name)
if _, exists := l[normalized]; !exists {
l[normalized] = lbl.Value
}
}
// Map service.name + service.namespace to job
if haveServiceName {
val := serviceName.AsString()
@ -169,7 +184,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
if haveInstanceID {
l[model.InstanceLabel] = instance.AsString()
}
for key, value := range externalLabels {
for key, value := range settings.ExternalLabels {
// External labels have already been sanitized
if _, alreadyExists := l[key]; alreadyExists {
// Skip external labels if they are overridden by metric attributes
@ -182,12 +197,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
if i+1 >= len(extras) {
break
}
_, found := l[extras[i]]
name := extras[i]
_, found := l[name]
if found && logOnOverwrite {
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
}
// internal labels should be maintained
name := extras[i]
if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
name = prometheustranslator.NormalizeLabel(name)
}
@ -219,12 +235,19 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
return false
}
// addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series
// as classical histogram samples.
//
// Note that we can't convert to native histograms, since these have exponential buckets and don't line up
// with the user defined bucket boundaries of non-exponential OTel histograms.
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
// https://github.com/prometheus/prometheus/issues/13485.
func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice,
resource pcommon.Resource, settings Settings, baseName string) {
for x := 0; x < dataPoints.Len(); x++ {
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
// If the sum is unset, it indicates the _sum metric point should be
// omitted
@ -400,7 +423,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(dataPoints pmetric.SummaryDat
for x := 0; x < dataPoints.Len(); x++ {
pt := dataPoints.At(x)
timestamp := convertTimeStamp(pt.Timestamp())
baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nil, false)
baseLabels := createAttributes(resource, pt.Attributes(), settings, nil, false)
// treat sum as a sample in an individual TimeSeries
sum := &prompb.Sample{
@ -546,7 +569,8 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta
name = settings.Namespace + "_" + name
}
labels := createAttributes(resource, attributes, settings.ExternalLabels, identifyingAttrs, false, model.MetricNameLabel, name)
settings.PromoteResourceAttributes = nil
labels := createAttributes(resource, attributes, settings, identifyingAttrs, false, model.MetricNameLabel, name)
haveIdentifier := false
for _, l := range labels {
if l.Name == model.JobLabel || l.Name == model.InstanceLabel {

View file

@ -0,0 +1,161 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheusremotewrite
import (
"testing"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/collector/pdata/pcommon"
"github.com/prometheus/prometheus/prompb"
)
func TestCreateAttributes(t *testing.T) {
resourceAttrs := map[string]string{
"service.name": "service name",
"service.instance.id": "service ID",
"existent-attr": "resource value",
// This one is for testing conflict with metric attribute.
"metric-attr": "resource value",
// This one is for testing conflict with auto-generated job attribute.
"job": "resource value",
// This one is for testing conflict with auto-generated instance attribute.
"instance": "resource value",
}
resource := pcommon.NewResource()
for k, v := range resourceAttrs {
resource.Attributes().PutStr(k, v)
}
attrs := pcommon.NewMap()
attrs.PutStr("__name__", "test_metric")
attrs.PutStr("metric-attr", "metric value")
testCases := []struct {
name string
promoteResourceAttributes []string
expectedLabels []prompb.Label
}{
{
name: "Successful conversion without resource attribute promotion",
promoteResourceAttributes: nil,
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "metric_attr",
Value: "metric value",
},
},
},
{
name: "Successful conversion with resource attribute promotion",
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "metric_attr",
Value: "metric value",
},
{
Name: "existent_attr",
Value: "resource value",
},
},
},
{
name: "Successful conversion with resource attribute promotion, conflicting resource attributes are ignored",
promoteResourceAttributes: []string{"non-existent-attr", "existent-attr", "metric-attr", "job", "instance"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "existent_attr",
Value: "resource value",
},
{
Name: "metric_attr",
Value: "metric value",
},
},
},
{
name: "Successful conversion with resource attribute promotion, attributes are only promoted once",
promoteResourceAttributes: []string{"existent-attr", "existent-attr"},
expectedLabels: []prompb.Label{
{
Name: "__name__",
Value: "test_metric",
},
{
Name: "instance",
Value: "service ID",
},
{
Name: "job",
Value: "service name",
},
{
Name: "existent_attr",
Value: "resource value",
},
{
Name: "metric_attr",
Value: "metric value",
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
settings := Settings{
PromoteResourceAttributes: tc.promoteResourceAttributes,
}
lbls := createAttributes(resource, attrs, settings, nil, false)
assert.ElementsMatch(t, lbls, tc.expectedLabels)
})
}
}

View file

@ -30,25 +30,28 @@ import (
const defaultZeroThreshold = 1e-128
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
// as native histogram samples.
func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
resource pcommon.Resource, settings Settings, baseName string) error {
resource pcommon.Resource, settings Settings, promName string) error {
for x := 0; x < dataPoints.Len(); x++ {
pt := dataPoints.At(x)
lbls := createAttributes(
resource,
pt.Attributes(),
settings.ExternalLabels,
nil,
true,
model.MetricNameLabel,
baseName,
)
ts, _ := c.getOrCreateTimeSeries(lbls)
histogram, err := exponentialToNativeHistogram(pt)
if err != nil {
return err
}
lbls := createAttributes(
resource,
pt.Attributes(),
settings,
nil,
true,
model.MetricNameLabel,
promName,
)
ts, _ := c.getOrCreateTimeSeries(lbls)
ts.Histograms = append(ts.Histograms, histogram)
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
@ -58,7 +61,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
return nil
}
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
// to Prometheus Native Histogram.
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) {
scale := p.Scale()

Some files were not shown because too many files have changed in this diff Show more