mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
Merge branch 'main' into 3.0-main-sync-24-07-09
This commit is contained in:
commit
c0d67fd845
|
@ -194,6 +194,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
|||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
||||
case "metadata-wal-records":
|
||||
c.scrape.AppendMetadata = true
|
||||
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||
case "new-service-discovery-manager":
|
||||
c.enableNewSDManager = true
|
||||
level.Info(logger).Log("msg", "Experimental service discovery manager")
|
||||
|
@ -320,9 +323,15 @@ func main() {
|
|||
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
|
||||
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
|
||||
|
||||
// TODO(bwplotka): Consider allowing those remote receive flags to be changed in config.
|
||||
// See https://github.com/prometheus/prometheus/issues/14410
|
||||
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
|
||||
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
|
||||
|
||||
supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}
|
||||
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
|
||||
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
|
||||
|
||||
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
|
||||
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
|
||||
|
||||
|
@ -644,7 +653,7 @@ func main() {
|
|||
var (
|
||||
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
||||
scraper = &readyScrapeManager{}
|
||||
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
|
||||
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
|
||||
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
||||
)
|
||||
|
||||
|
@ -1765,3 +1774,39 @@ type discoveryManager interface {
|
|||
Run() error
|
||||
SyncCh() <-chan map[string][]*targetgroup.Group
|
||||
}
|
||||
|
||||
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
|
||||
type rwProtoMsgFlagParser struct {
|
||||
msgs *[]config.RemoteWriteProtoMsg
|
||||
}
|
||||
|
||||
func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value {
|
||||
return &rwProtoMsgFlagParser{msgs: msgs}
|
||||
}
|
||||
|
||||
// IsCumulative is used by kingpin to tell if it's an array or not.
|
||||
func (p *rwProtoMsgFlagParser) IsCumulative() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *rwProtoMsgFlagParser) String() string {
|
||||
ss := make([]string, 0, len(*p.msgs))
|
||||
for _, t := range *p.msgs {
|
||||
ss = append(ss, string(t))
|
||||
}
|
||||
return strings.Join(ss, ",")
|
||||
}
|
||||
|
||||
func (p *rwProtoMsgFlagParser) Set(opt string) error {
|
||||
t := config.RemoteWriteProtoMsg(opt)
|
||||
if err := t.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, prev := range *p.msgs {
|
||||
if prev == t {
|
||||
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
|
||||
}
|
||||
}
|
||||
*p.msgs = append(*p.msgs, t)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -30,11 +30,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
|
@ -499,3 +501,65 @@ func TestDocumentation(t *testing.T) {
|
|||
|
||||
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
|
||||
}
|
||||
|
||||
func TestRwProtoMsgFlagParser(t *testing.T) {
|
||||
defaultOpts := config.RemoteWriteProtoMsgs{
|
||||
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
|
||||
}
|
||||
|
||||
for _, tcase := range []struct {
|
||||
args []string
|
||||
expected []config.RemoteWriteProtoMsg
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
args: nil,
|
||||
expected: defaultOpts,
|
||||
},
|
||||
{
|
||||
args: []string{"--test-proto-msgs", "test"},
|
||||
expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"),
|
||||
},
|
||||
{
|
||||
args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"},
|
||||
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2},
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||
},
|
||||
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"),
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||
"--test-proto-msgs", "prometheus.WriteRequest",
|
||||
},
|
||||
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1},
|
||||
},
|
||||
{
|
||||
args: []string{
|
||||
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||
"--test-proto-msgs", "prometheus.WriteRequest",
|
||||
"--test-proto-msgs", "io.prometheus.write.v2.Request",
|
||||
},
|
||||
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"),
|
||||
},
|
||||
} {
|
||||
t.Run(strings.Join(tcase.args, ","), func(t *testing.T) {
|
||||
a := kingpin.New("test", "")
|
||||
var opt []config.RemoteWriteProtoMsg
|
||||
a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt))
|
||||
|
||||
_, err := a.Parse(tcase.args)
|
||||
if tcase.expectedErr != nil {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tcase.expectedErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tcase.expected, opt)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -180,6 +180,7 @@ var (
|
|||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
MetadataConfig: DefaultMetadataConfig,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
|
@ -279,7 +280,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
|||
|
||||
jobNames := map[string]string{}
|
||||
for i, scfg := range c.ScrapeConfigs {
|
||||
// We do these checks for library users that would not call Validate in
|
||||
// We do these checks for library users that would not call validate in
|
||||
// Unmarshal.
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return nil, err
|
||||
|
@ -1055,6 +1056,49 @@ func CheckTargetAddress(address model.LabelValue) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// RemoteWriteProtoMsg represents the known protobuf message for the remote write
|
||||
// 1.0 and 2.0 specs.
|
||||
type RemoteWriteProtoMsg string
|
||||
|
||||
// Validate returns error if the given reference for the protobuf message is not supported.
|
||||
func (s RemoteWriteProtoMsg) Validate() error {
|
||||
switch s {
|
||||
case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
|
||||
}
|
||||
}
|
||||
|
||||
type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
|
||||
|
||||
func (m RemoteWriteProtoMsgs) Strings() []string {
|
||||
ret := make([]string, 0, len(m))
|
||||
for _, typ := range m {
|
||||
ret = append(ret, string(typ))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m RemoteWriteProtoMsgs) String() string {
|
||||
return strings.Join(m.Strings(), ", ")
|
||||
}
|
||||
|
||||
var (
|
||||
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
|
||||
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
|
||||
//
|
||||
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||
// this reference.
|
||||
RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
|
||||
// RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
|
||||
// message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||
//
|
||||
// NOTE: This string is used for both HTTP header values and config value, so don't change
|
||||
// this reference.
|
||||
RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
|
||||
)
|
||||
|
||||
// RemoteWriteConfig is the configuration for writing to remote storage.
|
||||
type RemoteWriteConfig struct {
|
||||
URL *config.URL `yaml:"url"`
|
||||
|
@ -1064,6 +1108,9 @@ type RemoteWriteConfig struct {
|
|||
Name string `yaml:"name,omitempty"`
|
||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||
// ProtobufMessage specifies the protobuf message to use against the remote
|
||||
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
|
||||
|
||||
// We cannot do proper Go type embedding below as the parser will then parse
|
||||
// values arbitrarily into the overflow maps of further-down types.
|
||||
|
@ -1098,6 +1145,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
|||
return err
|
||||
}
|
||||
|
||||
if err := c.ProtobufMessage.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid protobuf_message value: %w", err)
|
||||
}
|
||||
|
||||
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
|
||||
// We cannot make it a pointer as the parser panics for inlined pointer structs.
|
||||
// Thus we just do its validation here.
|
||||
|
|
|
@ -108,9 +108,10 @@ var expectedConf = &Config{
|
|||
|
||||
RemoteWriteConfigs: []*RemoteWriteConfig{
|
||||
{
|
||||
URL: mustParseURL("http://remote1/push"),
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
Name: "drop_expensive",
|
||||
URL: mustParseURL("http://remote1/push"),
|
||||
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
Name: "drop_expensive",
|
||||
WriteRelabelConfigs: []*relabel.Config{
|
||||
{
|
||||
SourceLabels: model.LabelNames{"__name__"},
|
||||
|
@ -137,11 +138,12 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
{
|
||||
URL: mustParseURL("http://remote2/push"),
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
MetadataConfig: DefaultMetadataConfig,
|
||||
Name: "rw_tls",
|
||||
URL: mustParseURL("http://remote2/push"),
|
||||
ProtobufMessage: RemoteWriteProtoMsgV2,
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
MetadataConfig: DefaultMetadataConfig,
|
||||
Name: "rw_tls",
|
||||
HTTPClientConfig: config.HTTPClientConfig{
|
||||
TLSConfig: config.TLSConfig{
|
||||
CertFile: filepath.FromSlash("testdata/valid_cert_file"),
|
||||
|
@ -1800,6 +1802,10 @@ var expectedErrors = []struct {
|
|||
filename: "remote_write_authorization_header.bad.yml",
|
||||
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
|
||||
},
|
||||
{
|
||||
filename: "remote_write_wrong_msg.bad.yml",
|
||||
errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`,
|
||||
},
|
||||
{
|
||||
filename: "remote_write_url_missing.bad.yml",
|
||||
errMsg: `url for remote_write is empty`,
|
||||
|
|
1
config/testdata/conf.good.yml
vendored
1
config/testdata/conf.good.yml
vendored
|
@ -37,6 +37,7 @@ remote_write:
|
|||
key_file: valid_key_file
|
||||
|
||||
- url: http://remote2/push
|
||||
protobuf_message: io.prometheus.write.v2.Request
|
||||
name: rw_tls
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
|
|
3
config/testdata/remote_write_wrong_msg.bad.yml
vendored
Normal file
3
config/testdata/remote_write_wrong_msg.bad.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
remote_write:
|
||||
- url: localhost:9090
|
||||
protobuf_message: io.prometheus.writet.v2.Request # typo in 'write"
|
|
@ -26,6 +26,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--web.enable-lifecycle</code> | Enable shutdown and reload via HTTP request. | `false` |
|
||||
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
|
||||
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
|
||||
| <code class="text-nowrap">--web.remote-write-receiver.accepted-protobuf-messages</code> | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` |
|
||||
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
|
||||
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
|
||||
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
|
||||
|
|
|
@ -3575,6 +3575,17 @@ this functionality.
|
|||
# The URL of the endpoint to send samples to.
|
||||
url: <string>
|
||||
|
||||
# protobuf message to use when writing to the remote write endpoint.
|
||||
#
|
||||
# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which
|
||||
# will be deprecated eventually.
|
||||
# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former,
|
||||
# by improving efficiency and sending metadata, created timestamp and native histograms by default.
|
||||
#
|
||||
# Before changing this value, consult with your remote storage provider (or test) what message it supports.
|
||||
# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request
|
||||
[ protobuf_message: <prometheus.WriteRequest | io.prometheus.write.v2.Request> | default = prometheus.WriteRequest ]
|
||||
|
||||
# Timeout for requests to the remote write endpoint.
|
||||
[ remote_timeout: <duration> | default = 30s ]
|
||||
|
||||
|
@ -3596,6 +3607,7 @@ write_relabel_configs:
|
|||
[ send_exemplars: <boolean> | default = false ]
|
||||
|
||||
# Enables sending of native histograms, also known as sparse histograms, over remote write.
|
||||
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
|
||||
[ send_native_histograms: <boolean> | default = false ]
|
||||
|
||||
# Sets the `Authorization` header on every remote write request with the
|
||||
|
@ -3609,7 +3621,7 @@ basic_auth:
|
|||
# Optional `Authorization` header configuration.
|
||||
authorization:
|
||||
# Sets the authentication type.
|
||||
[ type: <string> | default: Bearer ]
|
||||
[ type: <string> | default = Bearer ]
|
||||
# Sets the credentials. It is mutually exclusive with
|
||||
# `credentials_file`.
|
||||
[ credentials: <secret> ]
|
||||
|
@ -3673,7 +3685,7 @@ tls_config:
|
|||
# contain port numbers.
|
||||
[ no_proxy: <string> ]
|
||||
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
|
||||
[ proxy_from_environment: <boolean> | default: false ]
|
||||
[ proxy_from_environment: <boolean> | default = false ]
|
||||
# Specifies headers to send to proxies during CONNECT requests.
|
||||
[ proxy_connect_header:
|
||||
[ <string>: [<secret>, ...] ] ]
|
||||
|
@ -3682,7 +3694,7 @@ tls_config:
|
|||
[ follow_redirects: <boolean> | default = true ]
|
||||
|
||||
# Whether to enable HTTP2.
|
||||
[ enable_http2: <boolean> | default: true ]
|
||||
[ enable_http2: <boolean> | default = true ]
|
||||
|
||||
# Configures the queue used to write to remote storage.
|
||||
queue_config:
|
||||
|
@ -3712,7 +3724,10 @@ queue_config:
|
|||
# which means that all samples are sent.
|
||||
[ sample_age_limit: <duration> | default = 0s ]
|
||||
|
||||
# Configures the sending of series metadata to remote storage.
|
||||
# Configures the sending of series metadata to remote storage
|
||||
# if the `prometheus.WriteRequest` message was chosen. When
|
||||
# `io.prometheus.write.v2.Request` is used, metadata is always sent.
|
||||
#
|
||||
# Metadata configuration is subject to change at any point
|
||||
# or be removed in future releases.
|
||||
metadata_config:
|
||||
|
|
|
@ -224,3 +224,13 @@ When the `concurrent-rule-eval` feature flag is enabled, rules without any depen
|
|||
This has the potential to improve rule group evaluation latency and resource utilization at the expense of adding more concurrent query load.
|
||||
|
||||
The number of concurrent rule evaluations can be configured with `--rules.max-concurrent-rule-evals`, which is set to `4` by default.
|
||||
|
||||
## Metadata WAL Records
|
||||
|
||||
`--enable-feature=metadata-wal-records`
|
||||
|
||||
When enabled, Prometheus will store metadata in-memory and keep track of
|
||||
metadata changes as WAL records on a per-series basis.
|
||||
|
||||
This must be used if
|
||||
you are also using remote write 2.0 as it will only gather metadata from the WAL.
|
||||
|
|
|
@ -25,8 +25,10 @@ Other non-`2xx` codes may be returned for errors occurring before the API
|
|||
endpoint is reached.
|
||||
|
||||
An array of warnings may be returned if there are errors that do
|
||||
not inhibit the request execution. All of the data that was successfully
|
||||
collected will be returned in the data field.
|
||||
not inhibit the request execution. An additional array of info-level
|
||||
annotations may be returned for potential query issues that may or may
|
||||
not be false positives. All of the data that was successfully collected
|
||||
will be returned in the data field.
|
||||
|
||||
The JSON response envelope format is as follows:
|
||||
|
||||
|
@ -40,9 +42,11 @@ The JSON response envelope format is as follows:
|
|||
"errorType": "<string>",
|
||||
"error": "<string>",
|
||||
|
||||
// Only if there were warnings while executing the request.
|
||||
// Only set if there were warnings while executing the request.
|
||||
// There will still be data in the data field.
|
||||
"warnings": ["<string>"]
|
||||
"warnings": ["<string>"],
|
||||
// Only set if there were info-level annnotations while executing the request.
|
||||
"infos": ["<string>"]
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -452,7 +456,7 @@ raw numbers.
|
|||
|
||||
The keys `"histogram"` and `"histograms"` only show up if the experimental
|
||||
native histograms are present in the response. Their placeholder `<histogram>`
|
||||
is explained in detail in its own section below.
|
||||
is explained in detail in its own section below.
|
||||
|
||||
### Range vectors
|
||||
|
||||
|
@ -470,7 +474,7 @@ Range vectors are returned as result type `matrix`. The corresponding
|
|||
]
|
||||
```
|
||||
|
||||
Each series could have the `"values"` key, or the `"histograms"` key, or both.
|
||||
Each series could have the `"values"` key, or the `"histograms"` key, or both.
|
||||
For a given timestamp, there will only be one sample of either float or histogram type.
|
||||
|
||||
Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort)
|
||||
|
@ -1309,7 +1313,7 @@ endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview).
|
|||
|
||||
## OTLP Receiver
|
||||
|
||||
Prometheus can be configured as a receiver for the OTLP Metrics protocol. This
|
||||
Prometheus can be configured as a receiver for the OTLP Metrics protocol. This
|
||||
is not considered an efficient way of ingesting samples. Use it
|
||||
with caution for specific low-volume use cases. It is not suitable for
|
||||
replacing the ingestion via scraping.
|
||||
|
|
|
@ -95,3 +95,13 @@ Assuming this metric contains one time series per running instance, you could
|
|||
count the number of running instances per application like this:
|
||||
|
||||
count by (app) (instance_cpu_time_ns)
|
||||
|
||||
If we are exploring some metrics for their labels, to e.g. be able to aggregate
|
||||
over some of them, we could use the following:
|
||||
|
||||
limitk(10, app_foo_metric_bar)
|
||||
|
||||
Alternatively, if we wanted the returned timeseries to be more evenly sampled,
|
||||
we could use the following to get approximately 10% of them:
|
||||
|
||||
limit_ratio(0.1, app_foo_metric_bar)
|
||||
|
|
|
@ -230,6 +230,8 @@ vector of fewer elements with aggregated values:
|
|||
* `bottomk` (smallest k elements by sample value)
|
||||
* `topk` (largest k elements by sample value)
|
||||
* `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
|
||||
* `limitk` (sample n elements)
|
||||
* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`)
|
||||
|
||||
These operators can either be used to aggregate over **all** label dimensions
|
||||
or preserve distinct dimensions by including a `without` or `by` clause. These
|
||||
|
@ -249,8 +251,8 @@ all other labels are preserved in the output. `by` does the opposite and drops
|
|||
labels that are not listed in the `by` clause, even if their label values are
|
||||
identical between all elements of the vector.
|
||||
|
||||
`parameter` is only required for `count_values`, `quantile`, `topk` and
|
||||
`bottomk`.
|
||||
`parameter` is only required for `count_values`, `quantile`, `topk`,
|
||||
`bottomk`, `limitk` and `limit_ratio`.
|
||||
|
||||
`count_values` outputs one time series per unique sample value. Each series has
|
||||
an additional label. The name of that label is given by the aggregation
|
||||
|
@ -261,11 +263,16 @@ time series is the number of times that sample value was present.
|
|||
the input samples, including the original labels, are returned in the result
|
||||
vector. `by` and `without` are only used to bucket the input vector.
|
||||
|
||||
`limitk` and `limit_ratio` also return a subset of the input samples,
|
||||
including the original labels in the result vector, these are experimental
|
||||
operators that must be enabled with `--enable-feature=promql-experimental-functions`.
|
||||
|
||||
`quantile` calculates the φ-quantile, the value that ranks at number φ*N among
|
||||
the N metric values of the dimensions aggregated over. φ is provided as the
|
||||
aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
|
||||
`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
If the metric `http_requests_total` had time series that fan out by
|
||||
|
@ -291,6 +298,33 @@ To get the 5 largest HTTP requests counts across all instances we could write:
|
|||
|
||||
topk(5, http_requests_total)
|
||||
|
||||
To sample 10 timeseries, for example to inspect labels and their values, we
|
||||
could write:
|
||||
|
||||
limitk(10, http_requests_total)
|
||||
|
||||
To deterministically sample approximately 10% of timeseries we could write:
|
||||
|
||||
limit_ratio(0.1, http_requests_total)
|
||||
|
||||
Given that `limit_ratio()` implements a deterministic sampling algorithm (based
|
||||
on labels' hash), you can get the _complement_ of the above samples, i.e.
|
||||
approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)`
|
||||
with:
|
||||
|
||||
limit_ratio(-0.9, http_requests_total)
|
||||
|
||||
You can also use this feature to e.g. verify that `avg()` is a representative
|
||||
aggregation for your samples' values, by checking that the difference between
|
||||
averaging two samples' subsets is "small" when compared to the standard
|
||||
deviation.
|
||||
|
||||
abs(
|
||||
avg(limit_ratio(0.5, http_requests_total))
|
||||
-
|
||||
avg(limit_ratio(-0.5, http_requests_total))
|
||||
) <= bool stddev(http_requests_total)
|
||||
|
||||
## Binary operator precedence
|
||||
|
||||
The following list shows the precedence of binary operators in Prometheus, from
|
||||
|
|
|
@ -7,6 +7,7 @@ To use it:
|
|||
|
||||
```
|
||||
go build
|
||||
|
||||
./example_write_adapter
|
||||
```
|
||||
|
||||
|
@ -15,10 +16,19 @@ go build
|
|||
```yaml
|
||||
remote_write:
|
||||
- url: "http://localhost:1234/receive"
|
||||
protobuf_message: "io.prometheus.write.v2.Request"
|
||||
```
|
||||
|
||||
Then start Prometheus:
|
||||
or for deprecated Remote Write 1.0 message:
|
||||
|
||||
```yaml
|
||||
remote_write:
|
||||
- url: "http://localhost:1234/receive"
|
||||
protobuf_message: "prometheus.WriteRequest"
|
||||
```
|
||||
|
||||
Then start Prometheus (in separate terminal):
|
||||
|
||||
```
|
||||
./prometheus
|
||||
./prometheus --enable-feature=metadata-wal-records
|
||||
```
|
||||
|
|
|
@ -18,44 +18,103 @@ import (
|
|||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage/remote"
|
||||
)
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
|
||||
req, err := remote.DecodeWriteRequest(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
enc := r.Header.Get("Content-Encoding")
|
||||
if enc == "" {
|
||||
http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
if enc != "snappy" {
|
||||
http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ts := range req.Timeseries {
|
||||
m := make(model.Metric, len(ts.Labels))
|
||||
for _, l := range ts.Labels {
|
||||
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
|
||||
}
|
||||
fmt.Println(m)
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType)
|
||||
}
|
||||
|
||||
for _, s := range ts.Samples {
|
||||
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
|
||||
}
|
||||
defer func() { _ = r.Body.Close() }()
|
||||
|
||||
for _, e := range ts.Exemplars {
|
||||
m := make(model.Metric, len(e.Labels))
|
||||
for _, l := range e.Labels {
|
||||
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
|
||||
}
|
||||
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
|
||||
// Very simplistic content parsing, see
|
||||
// storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example.
|
||||
switch contentType {
|
||||
case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest":
|
||||
req, err := remote.DecodeWriteRequest(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
for _, hp := range ts.Histograms {
|
||||
h := remote.HistogramProtoToHistogram(hp)
|
||||
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||
printV1(req)
|
||||
case "application/x-protobuf;proto=io.prometheus.write.v2.Request":
|
||||
req, err := remote.DecodeWriteV2Request(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
printV2(req)
|
||||
default:
|
||||
msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
|
||||
fmt.Println(msg)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
}
|
||||
})
|
||||
|
||||
log.Fatal(http.ListenAndServe(":1234", nil))
|
||||
}
|
||||
|
||||
func printV1(req *prompb.WriteRequest) {
|
||||
b := labels.NewScratchBuilder(0)
|
||||
for _, ts := range req.Timeseries {
|
||||
fmt.Println(ts.ToLabels(&b, nil))
|
||||
|
||||
for _, s := range ts.Samples {
|
||||
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
|
||||
}
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := ep.ToExemplar(&b, nil)
|
||||
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
|
||||
}
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
h := hp.ToFloatHistogram()
|
||||
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||
continue
|
||||
}
|
||||
h := hp.ToIntHistogram()
|
||||
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printV2(req *writev2.Request) {
|
||||
b := labels.NewScratchBuilder(0)
|
||||
for _, ts := range req.Timeseries {
|
||||
l := ts.ToLabels(&b, req.Symbols)
|
||||
m := ts.ToMetadata(req.Symbols)
|
||||
fmt.Println(l, m)
|
||||
|
||||
for _, s := range ts.Samples {
|
||||
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
|
||||
}
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := ep.ToExemplar(&b, req.Symbols)
|
||||
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
|
||||
}
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
h := hp.ToFloatHistogram()
|
||||
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||
continue
|
||||
}
|
||||
h := hp.ToIntHistogram()
|
||||
fmt.Printf("\tHistogram: %s\n", h.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ require (
|
|||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.5
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/common v0.54.0
|
||||
github.com/prometheus/common v0.55.0
|
||||
github.com/prometheus/prometheus v0.52.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
)
|
||||
|
@ -17,10 +17,10 @@ require (
|
|||
require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/aws/aws-sdk-go v1.51.25 // indirect
|
||||
github.com/aws/aws-sdk-go v1.53.16 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
|
@ -31,8 +31,7 @@ require (
|
|||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@ -41,31 +40,31 @@ require (
|
|||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.5.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.5.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.98.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
|
||||
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.8.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.101.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
|
||||
go.opentelemetry.io/otel v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/oauth2 v0.19.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
|
||||
google.golang.org/grpc v1.63.2 // indirect
|
||||
google.golang.org/protobuf v1.34.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/grpc v1.64.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.29.3 // indirect
|
||||
|
@ -81,4 +80,10 @@ exclude (
|
|||
cloud.google.com/go v0.34.0
|
||||
cloud.google.com/go v0.65.0
|
||||
cloud.google.com/go v0.82.0
|
||||
|
||||
// Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules.
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
)
|
||||
|
||||
// TODO(bwplotka): Move to main branch commit or perhaps released version.
|
||||
replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c
|
||||
|
|
|
@ -2,10 +2,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqb
|
|||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
|
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
|
|||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.51.25 h1:DjTT8mtmsachhV6yrXR8+yhnG6120dazr720nopRsls=
|
||||
github.com/aws/aws-sdk-go v1.51.25/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
|
||||
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
@ -37,8 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
|
||||
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
|
||||
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
|
||||
github.com/digitalocean/godo v1.113.0 h1:CLtCxlP4wDAjKIQ+Hshht/UNbgAp8/J/XBH1ZtDCF9Y=
|
||||
github.com/digitalocean/godo v1.113.0/go.mod h1:Z2mTP848Vi3IXXl5YbPekUgr4j4tOePomA+OE1Ag98w=
|
||||
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
|
||||
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v26.0.1+incompatible h1:t39Hm6lpXuXtgkF0dm1t9a5HkbUfdGy6XbWexmGr+hA=
|
||||
github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
|
||||
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
|
@ -68,8 +68,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU
|
|||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
|
@ -95,8 +95,8 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
|
|||
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
|
||||
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
|
||||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||
github.com/go-resty/resty/v2 v2.12.0 h1:rsVL8P90LFvkUYq/V5BTVe203WfRIU4gvcf+yfzJzGA=
|
||||
github.com/go-resty/resty/v2 v2.12.0/go.mod h1:o0yGPrkS3lOe1+eFajk6kBW8ScXzwU3hD69/gt2yB/0=
|
||||
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
|
||||
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
|
||||
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
|
@ -135,40 +135,38 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
|||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gophercloud/gophercloud v1.11.0 h1:ls0O747DIq1D8SUHc7r2vI8BFbMLeLFuENaAIfEx7OM=
|
||||
github.com/gophercloud/gophercloud v1.11.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
|
||||
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
|
||||
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
|
||||
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
|
||||
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
|
||||
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7 h1:pjE59CS2C9Bg+Xby0ROrnZSSBWtKwx3Sf9gqsrvIFSA=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240418183417-ea5f2f6748c7/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
|
||||
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2 h1:UlE7n1GQZacCfyjv9tDVUN7HZfOXErPIfM/M039u9A0=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.7.2/go.mod h1:49tIV+pXRJTUC7fbFZ03s45LKqSQdOPP5y91eOnJo/k=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
|
||||
|
@ -208,14 +206,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI=
|
||||
github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI=
|
||||
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
|
||||
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
||||
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
||||
|
@ -243,8 +241,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
|
||||
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
|
||||
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
|
||||
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -269,22 +267,22 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
|
||||
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4=
|
||||
github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
|
||||
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26 h1:F+GIVtGqCFxPxO46ujf8cEOP574MBoRm3gNbPXECbxs=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.26/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
|
@ -306,20 +304,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
|
|||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
|
||||
go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
|
||||
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
|
||||
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
|
||||
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
|
||||
go.opentelemetry.io/collector/semconv v0.98.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
|
||||
go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
|
||||
go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
|
||||
go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
|
||||
go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
|
||||
go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
|
||||
go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
|
||||
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
|
||||
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
|
||||
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
|
||||
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
|
||||
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
|
||||
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
|
||||
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
|
||||
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
|
||||
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
|
||||
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
|
@ -330,14 +326,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -351,12 +347,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg=
|
||||
golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -380,38 +376,37 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
|
||||
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
|
||||
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -419,8 +414,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
|||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
|
||||
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -28,7 +28,7 @@ const (
|
|||
maxSetMatches = 256
|
||||
|
||||
// The minimum number of alternate values a regex should have to trigger
|
||||
// the optimization done by optimizeEqualStringMatchers() and so use a map
|
||||
// the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map
|
||||
// to match values instead of iterating over a list. This value has
|
||||
// been computed running BenchmarkOptimizeEqualStringMatchers.
|
||||
minEqualMultiStringMatcherMapThreshold = 16
|
||||
|
@ -337,7 +337,7 @@ func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates)
|
||||
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0)
|
||||
|
||||
for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
|
||||
// Split the string into the next literal and the remainder
|
||||
|
@ -412,7 +412,7 @@ func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
|
|||
clearBeginEndText(re)
|
||||
|
||||
m := stringMatcherFromRegexpInternal(re)
|
||||
m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
|
||||
m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
|
||||
|
||||
return m
|
||||
}
|
||||
|
@ -549,11 +549,7 @@ func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
|
|||
|
||||
// Right matcher with 1 fixed set match.
|
||||
case left == nil && len(matches) == 1:
|
||||
return &literalPrefixStringMatcher{
|
||||
prefix: matches[0],
|
||||
prefixCaseSensitive: matchesCaseSensitive,
|
||||
right: right,
|
||||
}
|
||||
return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right)
|
||||
|
||||
// Left matcher with 1 fixed set match.
|
||||
case right == nil && len(matches) == 1:
|
||||
|
@ -631,21 +627,47 @@ func (m *containsStringMatcher) Matches(s string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher.
|
||||
type literalPrefixStringMatcher struct {
|
||||
prefix string
|
||||
prefixCaseSensitive bool
|
||||
func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher {
|
||||
if prefixCaseSensitive {
|
||||
return &literalPrefixSensitiveStringMatcher{
|
||||
prefix: prefix,
|
||||
right: right,
|
||||
}
|
||||
}
|
||||
|
||||
return &literalPrefixInsensitiveStringMatcher{
|
||||
prefix: prefix,
|
||||
right: right,
|
||||
}
|
||||
}
|
||||
|
||||
// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher.
|
||||
type literalPrefixSensitiveStringMatcher struct {
|
||||
prefix string
|
||||
|
||||
// The matcher that must match the right side. Can be nil.
|
||||
right StringMatcher
|
||||
}
|
||||
|
||||
func (m *literalPrefixStringMatcher) Matches(s string) bool {
|
||||
// Ensure the prefix matches.
|
||||
if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) {
|
||||
func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool {
|
||||
if !strings.HasPrefix(s, m.prefix) {
|
||||
return false
|
||||
}
|
||||
if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) {
|
||||
|
||||
// Ensure the right side matches.
|
||||
return m.right.Matches(s[len(m.prefix):])
|
||||
}
|
||||
|
||||
// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher.
|
||||
type literalPrefixInsensitiveStringMatcher struct {
|
||||
prefix string
|
||||
|
||||
// The matcher that must match the right side. Can be nil.
|
||||
right StringMatcher
|
||||
}
|
||||
|
||||
func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool {
|
||||
if !hasPrefixCaseInsensitive(s, m.prefix) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -710,17 +732,20 @@ func (m *equalStringMatcher) Matches(s string) bool {
|
|||
type multiStringMatcherBuilder interface {
|
||||
StringMatcher
|
||||
add(s string)
|
||||
addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher)
|
||||
setMatches() []string
|
||||
}
|
||||
|
||||
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder {
|
||||
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder {
|
||||
// If the estimated size is low enough, it's faster to use a slice instead of a map.
|
||||
if estimatedSize < minEqualMultiStringMatcherMapThreshold {
|
||||
if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 {
|
||||
return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
|
||||
}
|
||||
|
||||
return &equalMultiStringMapMatcher{
|
||||
values: make(map[string]struct{}, estimatedSize),
|
||||
prefixes: make(map[string][]StringMatcher, estimatedPrefixes),
|
||||
minPrefixLen: minPrefixLength,
|
||||
caseSensitive: caseSensitive,
|
||||
}
|
||||
}
|
||||
|
@ -736,6 +761,10 @@ func (m *equalMultiStringSliceMatcher) add(s string) {
|
|||
m.values = append(m.values, s)
|
||||
}
|
||||
|
||||
func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (m *equalMultiStringSliceMatcher) setMatches() []string {
|
||||
return m.values
|
||||
}
|
||||
|
@ -757,12 +786,17 @@ func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// equalMultiStringMapMatcher matches a string exactly against a map of valid values.
|
||||
// equalMultiStringMapMatcher matches a string exactly against a map of valid values
|
||||
// or against a set of prefix matchers.
|
||||
type equalMultiStringMapMatcher struct {
|
||||
// values contains values to match a string against. If the matching is case insensitive,
|
||||
// the values here must be lowercase.
|
||||
values map[string]struct{}
|
||||
|
||||
// prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string.
|
||||
// If the matching is case insensitive, prefixes are all lowercase.
|
||||
prefixes map[string][]StringMatcher
|
||||
// minPrefixLen can be zero, meaning there are no prefix matchers.
|
||||
minPrefixLen int
|
||||
caseSensitive bool
|
||||
}
|
||||
|
||||
|
@ -774,8 +808,27 @@ func (m *equalMultiStringMapMatcher) add(s string) {
|
|||
m.values[s] = struct{}{}
|
||||
}
|
||||
|
||||
func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) {
|
||||
if m.minPrefixLen == 0 {
|
||||
panic("addPrefix called when no prefix length defined")
|
||||
}
|
||||
if len(prefix) < m.minPrefixLen {
|
||||
panic("addPrefix called with a too short prefix")
|
||||
}
|
||||
if m.caseSensitive != prefixCaseSensitive {
|
||||
panic("addPrefix called with a prefix whose case sensitivity is different than the expected one")
|
||||
}
|
||||
|
||||
s := prefix[:m.minPrefixLen]
|
||||
if !m.caseSensitive {
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
|
||||
m.prefixes[s] = append(m.prefixes[s], matcher)
|
||||
}
|
||||
|
||||
func (m *equalMultiStringMapMatcher) setMatches() []string {
|
||||
if len(m.values) >= maxSetMatches {
|
||||
if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -791,8 +844,17 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool {
|
|||
s = toNormalisedLower(s)
|
||||
}
|
||||
|
||||
_, ok := m.values[s]
|
||||
return ok
|
||||
if _, ok := m.values[s]; ok {
|
||||
return true
|
||||
}
|
||||
if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen {
|
||||
for _, matcher := range m.prefixes[s[:m.minPrefixLen]] {
|
||||
if matcher.Matches(s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert
|
||||
|
@ -875,20 +937,24 @@ func (m trueMatcher) Matches(_ string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an
|
||||
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In
|
||||
// this specific case, when we have many strings to match against we can use a map instead
|
||||
// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an
|
||||
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or
|
||||
// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher).
|
||||
//
|
||||
// In this specific case, when we have many strings to match against we can use a map instead
|
||||
// of iterating over the list of strings.
|
||||
func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher {
|
||||
func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher {
|
||||
var (
|
||||
caseSensitive bool
|
||||
caseSensitiveSet bool
|
||||
numValues int
|
||||
numPrefixes int
|
||||
minPrefixLength int
|
||||
)
|
||||
|
||||
// Analyse the input StringMatcher to count the number of occurrences
|
||||
// and ensure all of them have the same case sensitivity.
|
||||
analyseCallback := func(matcher *equalStringMatcher) bool {
|
||||
analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool {
|
||||
// Ensure we don't have mixed case sensitivity.
|
||||
if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
|
||||
return false
|
||||
|
@ -901,34 +967,55 @@ func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatch
|
|||
return true
|
||||
}
|
||||
|
||||
if !findEqualStringMatchers(input, analyseCallback) {
|
||||
analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
|
||||
// Ensure we don't have mixed case sensitivity.
|
||||
if caseSensitiveSet && caseSensitive != prefixCaseSensitive {
|
||||
return false
|
||||
} else if !caseSensitiveSet {
|
||||
caseSensitive = prefixCaseSensitive
|
||||
caseSensitiveSet = true
|
||||
}
|
||||
if numPrefixes == 0 || len(prefix) < minPrefixLength {
|
||||
minPrefixLength = len(prefix)
|
||||
}
|
||||
|
||||
numPrefixes++
|
||||
return true
|
||||
}
|
||||
|
||||
if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) {
|
||||
return input
|
||||
}
|
||||
|
||||
// If the number of values found is less than the threshold, then we should skip the optimization.
|
||||
if numValues < threshold {
|
||||
// If the number of values and prefixes found is less than the threshold, then we should skip the optimization.
|
||||
if (numValues + numPrefixes) < threshold {
|
||||
return input
|
||||
}
|
||||
|
||||
// Parse again the input StringMatcher to extract all values and storing them.
|
||||
// We can skip the case sensitivity check because we've already checked it and
|
||||
// if the code reach this point then it means all matchers have the same case sensitivity.
|
||||
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues)
|
||||
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength)
|
||||
|
||||
// Ignore the return value because we already iterated over the input StringMatcher
|
||||
// and it was all good.
|
||||
findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
|
||||
findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
|
||||
multiMatcher.add(matcher.s)
|
||||
return true
|
||||
}, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool {
|
||||
multiMatcher.addPrefix(prefix, caseSensitive, matcher)
|
||||
return true
|
||||
})
|
||||
|
||||
return multiMatcher
|
||||
}
|
||||
|
||||
// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each
|
||||
// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only*
|
||||
// composed by an alternation of equalStringMatcher.
|
||||
func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool {
|
||||
// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each
|
||||
// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found.
|
||||
//
|
||||
// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or
|
||||
// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered.
|
||||
func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool {
|
||||
orInput, ok := input.(orStringMatcher)
|
||||
if !ok {
|
||||
return false
|
||||
|
@ -937,17 +1024,27 @@ func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalSt
|
|||
for _, m := range orInput {
|
||||
switch casted := m.(type) {
|
||||
case orStringMatcher:
|
||||
if !findEqualStringMatchers(m, callback) {
|
||||
if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) {
|
||||
return false
|
||||
}
|
||||
|
||||
case *equalStringMatcher:
|
||||
if !callback(casted) {
|
||||
if !equalMatcherCallback(casted) {
|
||||
return false
|
||||
}
|
||||
|
||||
case *literalPrefixSensitiveStringMatcher:
|
||||
if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) {
|
||||
return false
|
||||
}
|
||||
|
||||
case *literalPrefixInsensitiveStringMatcher:
|
||||
if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) {
|
||||
return false
|
||||
}
|
||||
|
||||
default:
|
||||
// It's not an equal string matcher, so we have to stop searching
|
||||
// It's not an equal or prefix string matcher, so we have to stop searching
|
||||
// cause this optimization can't be applied.
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -71,6 +71,8 @@ var (
|
|||
// A long case insensitive alternation.
|
||||
"(?i:(zQPbMkNO|NNSPdvMi|iWuuSoAl|qbvKMimS|IecrXtPa|seTckYqt|NxnyHkgB|fIDlOgKb|UhlWIygH|OtNoJxHG|cUTkFVIV|mTgFIHjr|jQkoIDtE|PPMKxRXl|AwMfwVkQ|CQyMrTQJ|BzrqxVSi|nTpcWuhF|PertdywG|ZZDgCtXN|WWdDPyyE|uVtNQsKk|BdeCHvPZ|wshRnFlH|aOUIitIp|RxZeCdXT|CFZMslCj|AVBZRDxl|IzIGCnhw|ythYuWiz|oztXVXhl|VbLkwqQx|qvaUgyVC|VawUjPWC|ecloYJuj|boCLTdSU|uPrKeAZx|hrMWLWBq|JOnUNHRM|rYnujkPq|dDEdZhIj|DRrfvugG|yEGfDxVV|YMYdJWuP|PHUQZNWM|AmKNrLis|zTxndVfn|FPsHoJnc|EIulZTua|KlAPhdzg|ScHJJCLt|NtTfMzME|eMCwuFdo|SEpJVJbR|cdhXZeCx|sAVtBwRh|kVFEVcMI|jzJrxraA|tGLHTell|NNWoeSaw|DcOKSetX|UXZAJyka|THpMphDP|rizheevl|kDCBRidd|pCZZRqyu|pSygkitl|SwZGkAaW|wILOrfNX|QkwVOerj|kHOMxPDr|EwOVycJv|AJvtzQFS|yEOjKYYB|LizIINLL|JBRSsfcG|YPiUqqNl|IsdEbvee|MjEpGcBm|OxXZVgEQ|xClXGuxa|UzRCGFEb|buJbvfvA|IPZQxRet|oFYShsMc|oBHffuHO|bzzKrcBR|KAjzrGCl|IPUsAVls|OGMUMbIU|gyDccHuR|bjlalnDd|ZLWjeMna|fdsuIlxQ|dVXtiomV|XxedTjNg|XWMHlNoA|nnyqArQX|opfkWGhb|wYtnhdYb))",
|
||||
"(?i:(AAAAAAAAAAAAAAAAAAAAAAAA|BBBBBBBBBBBBBBBBBBBBBBBB|cccccccccccccccccccccccC|ſſſſſſſſſſſſſſſſſſſſſſſſS|SSSSSSSSSSSSSSSSSSSSSSSSſ))",
|
||||
// A short case insensitive alternation where each entry ends with ".*".
|
||||
"(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*))",
|
||||
// A long case insensitive alternation where each entry ends with ".*".
|
||||
"(?i:(zQPbMkNO.*|NNSPdvMi.*|iWuuSoAl.*|qbvKMimS.*|IecrXtPa.*|seTckYqt.*|NxnyHkgB.*|fIDlOgKb.*|UhlWIygH.*|OtNoJxHG.*|cUTkFVIV.*|mTgFIHjr.*|jQkoIDtE.*|PPMKxRXl.*|AwMfwVkQ.*|CQyMrTQJ.*|BzrqxVSi.*|nTpcWuhF.*|PertdywG.*|ZZDgCtXN.*|WWdDPyyE.*|uVtNQsKk.*|BdeCHvPZ.*|wshRnFlH.*|aOUIitIp.*|RxZeCdXT.*|CFZMslCj.*|AVBZRDxl.*|IzIGCnhw.*|ythYuWiz.*|oztXVXhl.*|VbLkwqQx.*|qvaUgyVC.*|VawUjPWC.*|ecloYJuj.*|boCLTdSU.*|uPrKeAZx.*|hrMWLWBq.*|JOnUNHRM.*|rYnujkPq.*|dDEdZhIj.*|DRrfvugG.*|yEGfDxVV.*|YMYdJWuP.*|PHUQZNWM.*|AmKNrLis.*|zTxndVfn.*|FPsHoJnc.*|EIulZTua.*|KlAPhdzg.*|ScHJJCLt.*|NtTfMzME.*|eMCwuFdo.*|SEpJVJbR.*|cdhXZeCx.*|sAVtBwRh.*|kVFEVcMI.*|jzJrxraA.*|tGLHTell.*|NNWoeSaw.*|DcOKSetX.*|UXZAJyka.*|THpMphDP.*|rizheevl.*|kDCBRidd.*|pCZZRqyu.*|pSygkitl.*|SwZGkAaW.*|wILOrfNX.*|QkwVOerj.*|kHOMxPDr.*|EwOVycJv.*|AJvtzQFS.*|yEOjKYYB.*|LizIINLL.*|JBRSsfcG.*|YPiUqqNl.*|IsdEbvee.*|MjEpGcBm.*|OxXZVgEQ.*|xClXGuxa.*|UzRCGFEb.*|buJbvfvA.*|IPZQxRet.*|oFYShsMc.*|oBHffuHO.*|bzzKrcBR.*|KAjzrGCl.*|IPUsAVls.*|OGMUMbIU.*|gyDccHuR.*|bjlalnDd.*|ZLWjeMna.*|fdsuIlxQ.*|dVXtiomV.*|XxedTjNg.*|XWMHlNoA.*|nnyqArQX.*|opfkWGhb.*|wYtnhdYb.*))",
|
||||
// A long case insensitive alternation where each entry starts with ".*".
|
||||
|
@ -376,7 +378,7 @@ func TestStringMatcherFromRegexp(t *testing.T) {
|
|||
{"10\\.0\\.(1|2)\\.+", nil},
|
||||
{"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
|
||||
{"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}},
|
||||
{"foo-.*$", &literalPrefixStringMatcher{prefix: "foo-", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}},
|
||||
{"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}},
|
||||
{"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
|
||||
{"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})},
|
||||
{"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})},
|
||||
|
@ -391,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
|
|||
{".*foo.*bar.*", nil},
|
||||
{`\d*`, nil},
|
||||
{".", nil},
|
||||
{"/|/bar.*", &literalPrefixStringMatcher{prefix: "/", prefixCaseSensitive: true, right: orStringMatcher{emptyStringMatcher{}, &literalPrefixStringMatcher{prefix: "bar", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
// This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat.
|
||||
// It would make the code too complex to handle it.
|
||||
{"(.+)/(foo.*|bar$)", nil},
|
||||
// Case sensitive alternate with same literal prefix and .* suffix.
|
||||
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "xyz-016a-ixb-", prefixCaseSensitive: true, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "dp", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "op", prefixCaseSensitive: true, right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
// Case insensitive alternate with same literal prefix and .* suffix.
|
||||
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixStringMatcher{prefix: "XYZ-016A-IXB-", prefixCaseSensitive: false, right: orStringMatcher{&literalPrefixStringMatcher{prefix: "DP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}, &literalPrefixStringMatcher{prefix: "OP", prefixCaseSensitive: false, right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
|
||||
// Concatenated variable length selectors are not supported.
|
||||
{"foo.*.*", nil},
|
||||
{"foo.+.+", nil},
|
||||
|
@ -408,9 +410,9 @@ func TestStringMatcherFromRegexp(t *testing.T) {
|
|||
{"aaa.?.?", nil},
|
||||
{"aaa.?.*", nil},
|
||||
// Regexps with ".?".
|
||||
{"ext.?|xfs", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
|
||||
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixStringMatcher{prefix: "ext", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
|
||||
{"foo.?", &literalPrefixStringMatcher{prefix: "foo", prefixCaseSensitive: true, right: &zeroOrOneCharacterStringMatcher{matchNL: false}}},
|
||||
{"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
|
||||
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
|
||||
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}},
|
||||
{"f.?o", nil},
|
||||
} {
|
||||
c := c
|
||||
|
@ -480,10 +482,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
|
|||
|
||||
re := regexp.MustCompile("^" + c.pattern + "$")
|
||||
|
||||
// Pre-condition check: ensure it contains literalPrefixStringMatcher.
|
||||
// Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher.
|
||||
numPrefixMatchers := 0
|
||||
visitStringMatcher(matcher, func(matcher StringMatcher) {
|
||||
if _, ok := matcher.(*literalPrefixStringMatcher); ok {
|
||||
if _, ok := matcher.(*literalPrefixSensitiveStringMatcher); ok {
|
||||
numPrefixMatchers++
|
||||
}
|
||||
if _, ok := matcher.(*literalPrefixInsensitiveStringMatcher); ok {
|
||||
numPrefixMatchers++
|
||||
}
|
||||
})
|
||||
|
@ -683,7 +688,15 @@ func randStrings(randGenerator *rand.Rand, many, length int) []string {
|
|||
return out
|
||||
}
|
||||
|
||||
func TestOptimizeEqualStringMatchers(t *testing.T) {
|
||||
func randStringsWithSuffix(randGenerator *rand.Rand, many, length int, suffix string) []string {
|
||||
out := randStrings(randGenerator, many, length)
|
||||
for i := range out {
|
||||
out[i] += suffix
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestOptimizeEqualOrPrefixStringMatchers(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input StringMatcher
|
||||
expectedValues []string
|
||||
|
@ -764,7 +777,7 @@ func TestOptimizeEqualStringMatchers(t *testing.T) {
|
|||
|
||||
for testName, testData := range tests {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
actualMatcher := optimizeEqualStringMatchers(testData.input, 0)
|
||||
actualMatcher := optimizeEqualOrPrefixStringMatchers(testData.input, 0)
|
||||
|
||||
if testData.expectedValues == nil {
|
||||
require.IsType(t, testData.input, actualMatcher)
|
||||
|
@ -779,10 +792,12 @@ func TestOptimizeEqualStringMatchers(t *testing.T) {
|
|||
|
||||
func TestNewEqualMultiStringMatcher(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
values []string
|
||||
caseSensitive bool
|
||||
expectedValuesMap map[string]struct{}
|
||||
expectedValuesList []string
|
||||
values []string
|
||||
caseSensitivePrefixes []*literalPrefixSensitiveStringMatcher
|
||||
caseSensitive bool
|
||||
expectedValuesMap map[string]struct{}
|
||||
expectedPrefixesMap map[string][]StringMatcher
|
||||
expectedValuesList []string
|
||||
}{
|
||||
"few case sensitive values": {
|
||||
values: []string{"a", "B"},
|
||||
|
@ -794,27 +809,47 @@ func TestNewEqualMultiStringMatcher(t *testing.T) {
|
|||
caseSensitive: false,
|
||||
expectedValuesList: []string{"a", "B"},
|
||||
},
|
||||
"few case sensitive values and prefixes": {
|
||||
values: []string{"a"},
|
||||
caseSensitivePrefixes: []*literalPrefixSensitiveStringMatcher{{prefix: "B", right: anyStringWithoutNewlineMatcher{}}},
|
||||
caseSensitive: true,
|
||||
expectedValuesMap: map[string]struct{}{"a": {}},
|
||||
expectedPrefixesMap: map[string][]StringMatcher{"B": {&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}}},
|
||||
},
|
||||
"many case sensitive values": {
|
||||
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
|
||||
caseSensitive: true,
|
||||
expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}},
|
||||
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
|
||||
caseSensitive: true,
|
||||
expectedValuesMap: map[string]struct{}{"a": {}, "B": {}, "c": {}, "D": {}, "e": {}, "F": {}, "g": {}, "H": {}, "i": {}, "L": {}, "m": {}, "N": {}, "o": {}, "P": {}, "q": {}, "r": {}},
|
||||
expectedPrefixesMap: map[string][]StringMatcher{},
|
||||
},
|
||||
"many case insensitive values": {
|
||||
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
|
||||
caseSensitive: false,
|
||||
expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}},
|
||||
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
|
||||
caseSensitive: false,
|
||||
expectedValuesMap: map[string]struct{}{"a": {}, "b": {}, "c": {}, "d": {}, "e": {}, "f": {}, "g": {}, "h": {}, "i": {}, "l": {}, "m": {}, "n": {}, "o": {}, "p": {}, "q": {}, "r": {}},
|
||||
expectedPrefixesMap: map[string][]StringMatcher{},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testData := range tests {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values))
|
||||
// To keep this test simple, we always assume a min prefix length of 1.
|
||||
minPrefixLength := 0
|
||||
if len(testData.caseSensitivePrefixes) > 0 {
|
||||
minPrefixLength = 1
|
||||
}
|
||||
|
||||
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.caseSensitivePrefixes), minPrefixLength)
|
||||
for _, v := range testData.values {
|
||||
matcher.add(v)
|
||||
}
|
||||
if testData.expectedValuesMap != nil {
|
||||
for _, p := range testData.caseSensitivePrefixes {
|
||||
matcher.addPrefix(p.prefix, true, p)
|
||||
}
|
||||
|
||||
if testData.expectedValuesMap != nil || testData.expectedPrefixesMap != nil {
|
||||
require.IsType(t, &equalMultiStringMapMatcher{}, matcher)
|
||||
require.Equal(t, testData.expectedValuesMap, matcher.(*equalMultiStringMapMatcher).values)
|
||||
require.Equal(t, testData.expectedPrefixesMap, matcher.(*equalMultiStringMapMatcher).prefixes)
|
||||
require.Equal(t, testData.caseSensitive, matcher.(*equalMultiStringMapMatcher).caseSensitive)
|
||||
}
|
||||
if testData.expectedValuesList != nil {
|
||||
|
@ -826,9 +861,32 @@ func TestNewEqualMultiStringMatcher(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestEqualMultiStringMapMatcher_addPrefix(t *testing.T) {
|
||||
t.Run("should panic if the matcher is case sensitive but the prefix is not case sensitive", func(t *testing.T) {
|
||||
matcher := newEqualMultiStringMatcher(true, 0, 1, 1)
|
||||
|
||||
require.Panics(t, func() {
|
||||
matcher.addPrefix("a", false, &literalPrefixInsensitiveStringMatcher{
|
||||
prefix: "a",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should panic if the matcher is not case sensitive but the prefix is case sensitive", func(t *testing.T) {
|
||||
matcher := newEqualMultiStringMatcher(false, 0, 1, 1)
|
||||
|
||||
require.Panics(t, func() {
|
||||
matcher.addPrefix("a", true, &literalPrefixSensitiveStringMatcher{
|
||||
prefix: "a",
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestEqualMultiStringMatcher_Matches(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
values []string
|
||||
prefixes []StringMatcher
|
||||
caseSensitive bool
|
||||
expectedMatches []string
|
||||
expectedNotMatches []string
|
||||
|
@ -845,6 +903,24 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) {
|
|||
expectedMatches: []string{"a", "A", "b", "B"},
|
||||
expectedNotMatches: []string{"c", "C"},
|
||||
},
|
||||
"few case sensitive prefixes": {
|
||||
prefixes: []StringMatcher{
|
||||
&literalPrefixSensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}},
|
||||
&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}},
|
||||
},
|
||||
caseSensitive: true,
|
||||
expectedMatches: []string{"a", "aX", "B", "BX"},
|
||||
expectedNotMatches: []string{"A", "b"},
|
||||
},
|
||||
"few case insensitive prefixes": {
|
||||
prefixes: []StringMatcher{
|
||||
&literalPrefixInsensitiveStringMatcher{prefix: "a", right: anyStringWithoutNewlineMatcher{}},
|
||||
&literalPrefixInsensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}},
|
||||
},
|
||||
caseSensitive: false,
|
||||
expectedMatches: []string{"a", "aX", "A", "AX", "b", "bX", "B", "BX"},
|
||||
expectedNotMatches: []string{"c", "cX", "C", "CX"},
|
||||
},
|
||||
"many case sensitive values": {
|
||||
values: []string{"a", "B", "c", "D", "e", "F", "g", "H", "i", "L", "m", "N", "o", "P", "q", "r"},
|
||||
caseSensitive: true,
|
||||
|
@ -857,14 +933,37 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) {
|
|||
expectedMatches: []string{"a", "A", "b", "B"},
|
||||
expectedNotMatches: []string{"x", "X"},
|
||||
},
|
||||
"mixed values and prefixes": {
|
||||
values: []string{"a"},
|
||||
prefixes: []StringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "B", right: anyStringWithoutNewlineMatcher{}}},
|
||||
caseSensitive: true,
|
||||
expectedMatches: []string{"a", "B", "BX"},
|
||||
expectedNotMatches: []string{"aX", "A", "b", "bX"},
|
||||
},
|
||||
}
|
||||
|
||||
for testName, testData := range tests {
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values))
|
||||
// To keep this test simple, we always assume a min prefix length of 1.
|
||||
minPrefixLength := 0
|
||||
if len(testData.prefixes) > 0 {
|
||||
minPrefixLength = 1
|
||||
}
|
||||
|
||||
matcher := newEqualMultiStringMatcher(testData.caseSensitive, len(testData.values), len(testData.prefixes), minPrefixLength)
|
||||
for _, v := range testData.values {
|
||||
matcher.add(v)
|
||||
}
|
||||
for _, p := range testData.prefixes {
|
||||
switch m := p.(type) {
|
||||
case *literalPrefixSensitiveStringMatcher:
|
||||
matcher.addPrefix(m.prefix, true, p)
|
||||
case *literalPrefixInsensitiveStringMatcher:
|
||||
matcher.addPrefix(m.prefix, false, p)
|
||||
default:
|
||||
panic("Unexpected type in test case")
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range testData.expectedMatches {
|
||||
require.True(t, matcher.Matches(v), "value: %s", v)
|
||||
|
@ -876,29 +975,33 @@ func TestEqualMultiStringMatcher_Matches(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFindEqualStringMatchers(t *testing.T) {
|
||||
func TestFindEqualOrPrefixStringMatchers(t *testing.T) {
|
||||
type match struct {
|
||||
s string
|
||||
caseSensitive bool
|
||||
}
|
||||
|
||||
// Utility to call findEqualStringMatchers() and collect all callback invocations.
|
||||
findEqualStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) {
|
||||
ok = findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
|
||||
// Utility to call findEqualOrPrefixStringMatchers() and collect all callback invocations.
|
||||
findEqualOrPrefixStringMatchersAndCollectMatches := func(input StringMatcher) (matches []match, ok bool) {
|
||||
ok = findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool {
|
||||
matches = append(matches, match{matcher.s, matcher.caseSensitive})
|
||||
return true
|
||||
}, func(prefix string, prefixCaseSensitive bool, right StringMatcher) bool {
|
||||
matches = append(matches, match{prefix, prefixCaseSensitive})
|
||||
return true
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("empty matcher", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(emptyStringMatcher{})
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(emptyStringMatcher{})
|
||||
require.False(t, actualOk)
|
||||
require.Empty(t, actualMatches)
|
||||
})
|
||||
|
||||
t.Run("concat of literal matchers (case sensitive)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&equalStringMatcher{s: "test-1", caseSensitive: true},
|
||||
&equalStringMatcher{s: "test-2", caseSensitive: true},
|
||||
|
@ -910,7 +1013,7 @@ func TestFindEqualStringMatchers(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("concat of literal matchers (case insensitive)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&equalStringMatcher{s: "test-1", caseSensitive: false},
|
||||
&equalStringMatcher{s: "test-2", caseSensitive: false},
|
||||
|
@ -922,7 +1025,7 @@ func TestFindEqualStringMatchers(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("concat of literal matchers (mixed case)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualStringMatchersAndCollectMatches(
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&equalStringMatcher{s: "test-1", caseSensitive: false},
|
||||
&equalStringMatcher{s: "test-2", caseSensitive: true},
|
||||
|
@ -932,11 +1035,59 @@ func TestFindEqualStringMatchers(t *testing.T) {
|
|||
require.True(t, actualOk)
|
||||
require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
|
||||
})
|
||||
|
||||
t.Run("concat of literal prefix matchers (case sensitive)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&literalPrefixSensitiveStringMatcher{prefix: "test-1"},
|
||||
&literalPrefixSensitiveStringMatcher{prefix: "test-2"},
|
||||
},
|
||||
)
|
||||
|
||||
require.True(t, actualOk)
|
||||
require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
|
||||
})
|
||||
|
||||
t.Run("concat of literal prefix matchers (case insensitive)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&literalPrefixInsensitiveStringMatcher{prefix: "test-1"},
|
||||
&literalPrefixInsensitiveStringMatcher{prefix: "test-2"},
|
||||
},
|
||||
)
|
||||
|
||||
require.True(t, actualOk)
|
||||
require.Equal(t, []match{{"test-1", false}, {"test-2", false}}, actualMatches)
|
||||
})
|
||||
|
||||
t.Run("concat of literal prefix matchers (mixed case)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&literalPrefixInsensitiveStringMatcher{prefix: "test-1"},
|
||||
&literalPrefixSensitiveStringMatcher{prefix: "test-2"},
|
||||
},
|
||||
)
|
||||
|
||||
require.True(t, actualOk)
|
||||
require.Equal(t, []match{{"test-1", false}, {"test-2", true}}, actualMatches)
|
||||
})
|
||||
|
||||
t.Run("concat of literal string and prefix matchers (case sensitive)", func(t *testing.T) {
|
||||
actualMatches, actualOk := findEqualOrPrefixStringMatchersAndCollectMatches(
|
||||
orStringMatcher{
|
||||
&equalStringMatcher{s: "test-1", caseSensitive: true},
|
||||
&literalPrefixSensitiveStringMatcher{prefix: "test-2"},
|
||||
},
|
||||
)
|
||||
|
||||
require.True(t, actualOk)
|
||||
require.Equal(t, []match{{"test-1", true}, {"test-2", true}}, actualMatches)
|
||||
})
|
||||
}
|
||||
|
||||
// This benchmark is used to find a good threshold to use to apply the optimization
|
||||
// done by optimizeEqualStringMatchers().
|
||||
func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
|
||||
// done by optimizeEqualOrPrefixStringMatchers().
|
||||
func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) {
|
||||
randGenerator := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// Generate variable lengths random texts to match against.
|
||||
|
@ -946,42 +1097,51 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
|
|||
|
||||
for numAlternations := 2; numAlternations <= 256; numAlternations *= 2 {
|
||||
for _, caseSensitive := range []bool{true, false} {
|
||||
b.Run(fmt.Sprintf("alternations: %d case sensitive: %t", numAlternations, caseSensitive), func(b *testing.B) {
|
||||
// Generate a regex with the expected number of alternations.
|
||||
re := strings.Join(randStrings(randGenerator, numAlternations, 10), "|")
|
||||
if !caseSensitive {
|
||||
re = "(?i:(" + re + "))"
|
||||
}
|
||||
|
||||
parsed, err := syntax.Parse(re, syntax.Perl)
|
||||
require.NoError(b, err)
|
||||
|
||||
unoptimized := stringMatcherFromRegexpInternal(parsed)
|
||||
require.IsType(b, orStringMatcher{}, unoptimized)
|
||||
|
||||
optimized := optimizeEqualStringMatchers(unoptimized, 0)
|
||||
if numAlternations < minEqualMultiStringMatcherMapThreshold {
|
||||
require.IsType(b, &equalMultiStringSliceMatcher{}, optimized)
|
||||
} else {
|
||||
require.IsType(b, &equalMultiStringMapMatcher{}, optimized)
|
||||
}
|
||||
|
||||
b.Run("without optimizeEqualStringMatchers()", func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, t := range texts {
|
||||
unoptimized.Matches(t)
|
||||
}
|
||||
for _, prefixMatcher := range []bool{true, false} {
|
||||
b.Run(fmt.Sprintf("alternations: %d case sensitive: %t prefix matcher: %t", numAlternations, caseSensitive, prefixMatcher), func(b *testing.B) {
|
||||
// If the test should run on prefix matchers, we add a wildcard matcher as suffix (prefix will be a literal).
|
||||
suffix := ""
|
||||
if prefixMatcher {
|
||||
suffix = ".*"
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("with optimizeEqualStringMatchers()", func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, t := range texts {
|
||||
optimized.Matches(t)
|
||||
}
|
||||
// Generate a regex with the expected number of alternations.
|
||||
re := strings.Join(randStringsWithSuffix(randGenerator, numAlternations, 10, suffix), "|")
|
||||
if !caseSensitive {
|
||||
re = "(?i:(" + re + "))"
|
||||
}
|
||||
b.Logf("regexp: %s", re)
|
||||
|
||||
parsed, err := syntax.Parse(re, syntax.Perl)
|
||||
require.NoError(b, err)
|
||||
|
||||
unoptimized := stringMatcherFromRegexpInternal(parsed)
|
||||
require.IsType(b, orStringMatcher{}, unoptimized)
|
||||
|
||||
optimized := optimizeEqualOrPrefixStringMatchers(unoptimized, 0)
|
||||
if numAlternations < minEqualMultiStringMatcherMapThreshold && !prefixMatcher {
|
||||
require.IsType(b, &equalMultiStringSliceMatcher{}, optimized)
|
||||
} else {
|
||||
require.IsType(b, &equalMultiStringMapMatcher{}, optimized)
|
||||
}
|
||||
|
||||
b.Run("without optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, t := range texts {
|
||||
unoptimized.Matches(t)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("with optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
for _, t := range texts {
|
||||
optimized.Matches(t)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1074,20 +1234,14 @@ func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestLiteralPrefixStringMatcher(t *testing.T) {
|
||||
m := &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &emptyStringMatcher{}}
|
||||
func TestLiteralPrefixSensitiveStringMatcher(t *testing.T) {
|
||||
m := &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}}
|
||||
require.True(t, m.Matches("mar"))
|
||||
require.False(t, m.Matches("marco"))
|
||||
require.False(t, m.Matches("ma"))
|
||||
require.False(t, m.Matches("mAr"))
|
||||
|
||||
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: false, right: &emptyStringMatcher{}}
|
||||
require.True(t, m.Matches("mar"))
|
||||
require.False(t, m.Matches("marco"))
|
||||
require.False(t, m.Matches("ma"))
|
||||
require.True(t, m.Matches("mAr"))
|
||||
|
||||
m = &literalPrefixStringMatcher{prefix: "mar", prefixCaseSensitive: true, right: &equalStringMatcher{s: "co", caseSensitive: false}}
|
||||
m = &literalPrefixSensitiveStringMatcher{prefix: "mar", right: &equalStringMatcher{s: "co", caseSensitive: false}}
|
||||
require.True(t, m.Matches("marco"))
|
||||
require.True(t, m.Matches("marCO"))
|
||||
require.False(t, m.Matches("MARco"))
|
||||
|
@ -1095,6 +1249,14 @@ func TestLiteralPrefixStringMatcher(t *testing.T) {
|
|||
require.False(t, m.Matches("marcopracucci"))
|
||||
}
|
||||
|
||||
func TestLiteralPrefixInsensitiveStringMatcher(t *testing.T) {
|
||||
m := &literalPrefixInsensitiveStringMatcher{prefix: "mar", right: &emptyStringMatcher{}}
|
||||
require.True(t, m.Matches("mar"))
|
||||
require.False(t, m.Matches("marco"))
|
||||
require.False(t, m.Matches("ma"))
|
||||
require.True(t, m.Matches("mAr"))
|
||||
}
|
||||
|
||||
func TestLiteralSuffixStringMatcher(t *testing.T) {
|
||||
m := &literalSuffixStringMatcher{left: &emptyStringMatcher{}, suffix: "co", suffixCaseSensitive: true}
|
||||
require.True(t, m.Matches("co"))
|
||||
|
@ -1184,7 +1346,10 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch
|
|||
visitStringMatcher(casted.right, callback)
|
||||
}
|
||||
|
||||
case *literalPrefixStringMatcher:
|
||||
case *literalPrefixSensitiveStringMatcher:
|
||||
visitStringMatcher(casted.right, callback)
|
||||
|
||||
case *literalPrefixInsensitiveStringMatcher:
|
||||
visitStringMatcher(casted.right, callback)
|
||||
|
||||
case *literalSuffixStringMatcher:
|
||||
|
@ -1196,10 +1361,16 @@ func visitStringMatcher(matcher StringMatcher, callback func(matcher StringMatch
|
|||
}
|
||||
|
||||
// No nested matchers for the following ones.
|
||||
case *equalMultiStringMapMatcher:
|
||||
for _, prefixes := range casted.prefixes {
|
||||
for _, matcher := range prefixes {
|
||||
visitStringMatcher(matcher, callback)
|
||||
}
|
||||
}
|
||||
|
||||
case emptyStringMatcher:
|
||||
case *equalStringMatcher:
|
||||
case *equalMultiStringSliceMatcher:
|
||||
case *equalMultiStringMapMatcher:
|
||||
case anyStringWithoutNewlineMatcher:
|
||||
case *anyNonEmptyStringMatcher:
|
||||
case trueMatcher:
|
||||
|
|
201
prompb/codec.go
Normal file
201
prompb/codec.go
Normal file
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prompb
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
|
||||
|
||||
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
|
||||
return labelProtosToLabels(b, m.GetLabels())
|
||||
}
|
||||
|
||||
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||
func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels {
|
||||
return labelProtosToLabels(b, m.GetLabels())
|
||||
}
|
||||
|
||||
func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels {
|
||||
b.Reset()
|
||||
for _, l := range labelPairs {
|
||||
b.Add(l.Name, l.Value)
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
|
||||
// FromLabels transforms labels into prompb labels. The buffer slice
|
||||
// will be used to avoid allocations if it is big enough to store the labels.
|
||||
func FromLabels(lbls labels.Labels, buf []Label) []Label {
|
||||
result := buf[:0]
|
||||
lbls.Range(func(l labels.Label) {
|
||||
result = append(result, Label{
|
||||
Name: l.Name,
|
||||
Value: l.Value,
|
||||
})
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
|
||||
func FromMetadataType(t model.MetricType) MetricMetadata_MetricType {
|
||||
mt := strings.ToUpper(string(t))
|
||||
v, ok := MetricMetadata_MetricType_value[mt]
|
||||
if !ok {
|
||||
return MetricMetadata_UNKNOWN
|
||||
}
|
||||
return MetricMetadata_MetricType(v)
|
||||
}
|
||||
|
||||
// IsFloatHistogram returns true if the histogram is float.
|
||||
func (h Histogram) IsFloatHistogram() bool {
|
||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
|
||||
// of integer histogram. If it's a float histogram, the method returns nil.
|
||||
func (h Histogram) ToIntHistogram() *histogram.Histogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return nil
|
||||
}
|
||||
return &histogram.Histogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountInt(),
|
||||
Count: h.GetCountInt(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeDeltas(),
|
||||
}
|
||||
}
|
||||
|
||||
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
|
||||
// of float histogram. If the underlying implementation is an integer histogram, a
|
||||
// conversion is performed.
|
||||
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountFloat(),
|
||||
Count: h.GetCountFloat(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveCounts(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeCounts(),
|
||||
}
|
||||
}
|
||||
// Conversion from integer histogram.
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: float64(h.GetZeroCountInt()),
|
||||
Count: float64(h.GetCountInt()),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func deltasToCounts(deltas []int64) []float64 {
|
||||
counts := make([]float64, len(deltas))
|
||||
var cur float64
|
||||
for i, d := range deltas {
|
||||
cur += float64(d)
|
||||
counts[i] = cur
|
||||
}
|
||||
return counts
|
||||
}
|
||||
|
||||
// FromIntHistogram returns remote Histogram from the integer Histogram.
|
||||
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// FromFloatHistogram returns remote Histogram from the float Histogram.
|
||||
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountFloat{CountFloat: fh.Count},
|
||||
Sum: fh.Sum,
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||
NegativeCounts: fh.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||
PositiveCounts: fh.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||
spans := make([]BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
// ToExemplar converts remote exemplar to model exemplar.
|
||||
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar {
|
||||
timestamp := m.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: labelProtosToLabels(b, m.GetLabels()),
|
||||
Value: m.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
|
@ -17,14 +17,6 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
func (m Sample) T() int64 { return m.Timestamp }
|
||||
func (m Sample) V() float64 { return m.Value }
|
||||
|
||||
func (h Histogram) IsFloatHistogram() bool {
|
||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) {
|
||||
size := r.Size()
|
||||
data, ok := p.Get().(*[]byte)
|
||||
|
|
216
prompb/io/prometheus/write/v2/codec.go
Normal file
216
prompb/io/prometheus/write/v2/codec.go
Normal file
|
@ -0,0 +1,216 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
)
|
||||
|
||||
// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon.
|
||||
|
||||
// ToLabels return model labels.Labels from timeseries' remote labels.
|
||||
func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
|
||||
return desymbolizeLabels(b, m.GetLabelsRefs(), symbols)
|
||||
}
|
||||
|
||||
// ToMetadata return model metadata from timeseries' remote metadata.
|
||||
func (m TimeSeries) ToMetadata(symbols []string) metadata.Metadata {
|
||||
typ := model.MetricTypeUnknown
|
||||
switch m.Metadata.Type {
|
||||
case Metadata_METRIC_TYPE_COUNTER:
|
||||
typ = model.MetricTypeCounter
|
||||
case Metadata_METRIC_TYPE_GAUGE:
|
||||
typ = model.MetricTypeGauge
|
||||
case Metadata_METRIC_TYPE_HISTOGRAM:
|
||||
typ = model.MetricTypeHistogram
|
||||
case Metadata_METRIC_TYPE_GAUGEHISTOGRAM:
|
||||
typ = model.MetricTypeGaugeHistogram
|
||||
case Metadata_METRIC_TYPE_SUMMARY:
|
||||
typ = model.MetricTypeSummary
|
||||
case Metadata_METRIC_TYPE_INFO:
|
||||
typ = model.MetricTypeInfo
|
||||
case Metadata_METRIC_TYPE_STATESET:
|
||||
typ = model.MetricTypeStateset
|
||||
}
|
||||
return metadata.Metadata{
|
||||
Type: typ,
|
||||
Unit: symbols[m.Metadata.UnitRef],
|
||||
Help: symbols[m.Metadata.HelpRef],
|
||||
}
|
||||
}
|
||||
|
||||
// FromMetadataType transforms a Prometheus metricType into writev2 metricType.
|
||||
// Since the former is a string we need to transform it to an enum.
|
||||
func FromMetadataType(t model.MetricType) Metadata_MetricType {
|
||||
switch t {
|
||||
case model.MetricTypeCounter:
|
||||
return Metadata_METRIC_TYPE_COUNTER
|
||||
case model.MetricTypeGauge:
|
||||
return Metadata_METRIC_TYPE_GAUGE
|
||||
case model.MetricTypeHistogram:
|
||||
return Metadata_METRIC_TYPE_HISTOGRAM
|
||||
case model.MetricTypeGaugeHistogram:
|
||||
return Metadata_METRIC_TYPE_GAUGEHISTOGRAM
|
||||
case model.MetricTypeSummary:
|
||||
return Metadata_METRIC_TYPE_SUMMARY
|
||||
case model.MetricTypeInfo:
|
||||
return Metadata_METRIC_TYPE_INFO
|
||||
case model.MetricTypeStateset:
|
||||
return Metadata_METRIC_TYPE_STATESET
|
||||
default:
|
||||
return Metadata_METRIC_TYPE_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
// IsFloatHistogram returns true if the histogram is float.
|
||||
func (h Histogram) IsFloatHistogram() bool {
|
||||
_, ok := h.GetCount().(*Histogram_CountFloat)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
|
||||
// of integer histogram. If it's a float histogram, the method returns nil.
|
||||
func (h Histogram) ToIntHistogram() *histogram.Histogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return nil
|
||||
}
|
||||
return &histogram.Histogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountInt(),
|
||||
Count: h.GetCountInt(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeDeltas(),
|
||||
CustomValues: h.GetCustomValues(),
|
||||
}
|
||||
}
|
||||
|
||||
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
|
||||
// of float histogram. If the underlying implementation is an integer histogram, a
|
||||
// conversion is performed.
|
||||
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
|
||||
if h.IsFloatHistogram() {
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: h.GetZeroCountFloat(),
|
||||
Count: h.GetCountFloat(),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: h.GetPositiveCounts(),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: h.GetNegativeCounts(),
|
||||
CustomValues: h.GetCustomValues(),
|
||||
}
|
||||
}
|
||||
// Conversion from integer histogram.
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(h.ResetHint),
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: float64(h.GetZeroCountInt()),
|
||||
Count: float64(h.GetCountInt()),
|
||||
Sum: h.Sum,
|
||||
PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()),
|
||||
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
|
||||
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
|
||||
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
|
||||
CustomValues: h.GetCustomValues(),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func deltasToCounts(deltas []int64) []float64 {
|
||||
counts := make([]float64, len(deltas))
|
||||
var cur float64
|
||||
for i, d := range deltas {
|
||||
cur += float64(d)
|
||||
counts[i] = cur
|
||||
}
|
||||
return counts
|
||||
}
|
||||
|
||||
// FromIntHistogram returns remote Histogram from the integer Histogram.
|
||||
func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(h.CounterResetHint),
|
||||
CustomValues: h.CustomValues,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// FromFloatHistogram returns remote Histogram from the float Histogram.
|
||||
func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram {
|
||||
return Histogram{
|
||||
Count: &Histogram_CountFloat{CountFloat: fh.Count},
|
||||
Sum: fh.Sum,
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||
NegativeCounts: fh.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||
PositiveCounts: fh.PositiveBuckets,
|
||||
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
|
||||
CustomValues: fh.CustomValues,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []BucketSpan {
|
||||
spans := make([]BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, symbols []string) exemplar.Exemplar {
|
||||
timestamp := m.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: desymbolizeLabels(b, m.LabelsRefs, symbols),
|
||||
Value: m.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
165
prompb/io/prometheus/write/v2/custom.go
Normal file
165
prompb/io/prometheus/write/v2/custom.go
Normal file
|
@ -0,0 +1,165 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"slices"
|
||||
)
|
||||
|
||||
func (m Sample) T() int64 { return m.Timestamp }
|
||||
func (m Sample) V() float64 { return m.Value }
|
||||
|
||||
func (m *Request) OptimizedMarshal(dst []byte) ([]byte, error) {
|
||||
siz := m.Size()
|
||||
if cap(dst) < siz {
|
||||
dst = make([]byte, siz)
|
||||
}
|
||||
n, err := m.OptimizedMarshalToSizedBuffer(dst[:siz])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
||||
|
||||
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
|
||||
// but calls OptimizedMarshalToSizedBuffer on the timeseries.
|
||||
func (m *Request) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Timeseries) > 0 {
|
||||
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Timeseries[iNdEx].OptimizedMarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
}
|
||||
if len(m.Symbols) > 0 {
|
||||
for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Symbols[iNdEx])
|
||||
copy(dAtA[i:], m.Symbols[iNdEx])
|
||||
i = encodeVarintTypes(dAtA, i, uint64(len(m.Symbols[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
// OptimizedMarshalToSizedBuffer is mostly a copy of the generated MarshalToSizedBuffer,
|
||||
// but marshals m.LabelsRefs in place without extra allocations.
|
||||
func (m *TimeSeries) OptimizedMarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.CreatedTimestamp != 0 {
|
||||
i = encodeVarintTypes(dAtA, i, uint64(m.CreatedTimestamp))
|
||||
i--
|
||||
dAtA[i] = 0x30
|
||||
}
|
||||
{
|
||||
size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
if len(m.Histograms) > 0 {
|
||||
for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
}
|
||||
if len(m.Exemplars) > 0 {
|
||||
for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
}
|
||||
if len(m.Samples) > 0 {
|
||||
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.LabelsRefs) > 0 {
|
||||
// This is the trick: encode the varints in reverse order to make it easier
|
||||
// to do it in place. Then reverse the whole thing.
|
||||
var j10 int
|
||||
start := i
|
||||
for _, num := range m.LabelsRefs {
|
||||
for num >= 1<<7 {
|
||||
dAtA[i-1] = uint8(uint64(num)&0x7f | 0x80)
|
||||
num >>= 7
|
||||
i--
|
||||
j10++
|
||||
}
|
||||
dAtA[i-1] = uint8(num)
|
||||
i--
|
||||
j10++
|
||||
}
|
||||
slices.Reverse(dAtA[i:start])
|
||||
// --- end of trick
|
||||
|
||||
i = encodeVarintTypes(dAtA, i, uint64(j10))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
97
prompb/io/prometheus/write/v2/custom_test.go
Normal file
97
prompb/io/prometheus/write/v2/custom_test.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2023 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOptimizedMarshal(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
m *Request
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
m: &Request{},
|
||||
},
|
||||
{
|
||||
name: "simple",
|
||||
m: &Request{
|
||||
Timeseries: []TimeSeries{
|
||||
{
|
||||
LabelsRefs: []uint32{
|
||||
0, 1,
|
||||
2, 3,
|
||||
4, 5,
|
||||
6, 7,
|
||||
8, 9,
|
||||
10, 11,
|
||||
12, 13,
|
||||
14, 15,
|
||||
},
|
||||
|
||||
Samples: []Sample{{Value: 1, Timestamp: 0}},
|
||||
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 1, Timestamp: 0}},
|
||||
Histograms: nil,
|
||||
},
|
||||
{
|
||||
LabelsRefs: []uint32{
|
||||
0, 1,
|
||||
2, 3,
|
||||
4, 5,
|
||||
6, 7,
|
||||
8, 9,
|
||||
10, 11,
|
||||
12, 13,
|
||||
14, 15,
|
||||
},
|
||||
Samples: []Sample{{Value: 2, Timestamp: 1}},
|
||||
Exemplars: []Exemplar{{LabelsRefs: []uint32{0, 1}, Value: 2, Timestamp: 1}},
|
||||
Histograms: nil,
|
||||
},
|
||||
},
|
||||
Symbols: []string{
|
||||
"a", "b",
|
||||
"c", "d",
|
||||
"e", "f",
|
||||
"g", "h",
|
||||
"i", "j",
|
||||
"k", "l",
|
||||
"m", "n",
|
||||
"o", "p",
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Keep the slice allocated to mimic what std Marshal
|
||||
// would give to sized Marshal.
|
||||
got := make([]byte, 0)
|
||||
|
||||
// Should be the same as the standard marshal.
|
||||
expected, err := tt.m.Marshal()
|
||||
require.NoError(t, err)
|
||||
got, err = tt.m.OptimizedMarshal(got)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, got)
|
||||
|
||||
// Unmarshal should work too.
|
||||
m := &Request{}
|
||||
require.NoError(t, m.Unmarshal(got))
|
||||
require.Equal(t, tt.m, m)
|
||||
})
|
||||
}
|
||||
}
|
83
prompb/io/prometheus/write/v2/symbols.go
Normal file
83
prompb/io/prometheus/write/v2/symbols.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import "github.com/prometheus/prometheus/model/labels"
|
||||
|
||||
// SymbolsTable implements table for easy symbol use.
|
||||
type SymbolsTable struct {
|
||||
strings []string
|
||||
symbolsMap map[string]uint32
|
||||
}
|
||||
|
||||
// NewSymbolTable returns a symbol table.
|
||||
func NewSymbolTable() SymbolsTable {
|
||||
return SymbolsTable{
|
||||
// Empty string is required as a first element.
|
||||
symbolsMap: map[string]uint32{"": 0},
|
||||
strings: []string{""},
|
||||
}
|
||||
}
|
||||
|
||||
// Symbolize adds (if not added before) a string to the symbols table,
|
||||
// while returning its reference number.
|
||||
func (t *SymbolsTable) Symbolize(str string) uint32 {
|
||||
if ref, ok := t.symbolsMap[str]; ok {
|
||||
return ref
|
||||
}
|
||||
ref := uint32(len(t.strings))
|
||||
t.strings = append(t.strings, str)
|
||||
t.symbolsMap[str] = ref
|
||||
return ref
|
||||
}
|
||||
|
||||
// SymbolizeLabels symbolize Prometheus labels.
|
||||
func (t *SymbolsTable) SymbolizeLabels(lbls labels.Labels, buf []uint32) []uint32 {
|
||||
result := buf[:0]
|
||||
lbls.Range(func(l labels.Label) {
|
||||
off := t.Symbolize(l.Name)
|
||||
result = append(result, off)
|
||||
off = t.Symbolize(l.Value)
|
||||
result = append(result, off)
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// Symbols returns computes symbols table to put in e.g. Request.Symbols.
|
||||
// As per spec, order does not matter.
|
||||
func (t *SymbolsTable) Symbols() []string {
|
||||
return t.strings
|
||||
}
|
||||
|
||||
// Reset clears symbols table.
|
||||
func (t *SymbolsTable) Reset() {
|
||||
// NOTE: Make sure to keep empty symbol.
|
||||
t.strings = t.strings[:1]
|
||||
for k := range t.symbolsMap {
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
delete(t.symbolsMap, k)
|
||||
}
|
||||
}
|
||||
|
||||
// desymbolizeLabels decodes label references, with given symbols to labels.
|
||||
func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
|
||||
b.Reset()
|
||||
for i := 0; i < len(labelRefs); i += 2 {
|
||||
b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
60
prompb/io/prometheus/write/v2/symbols_test.go
Normal file
60
prompb/io/prometheus/write/v2/symbols_test.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
func TestSymbolsTable(t *testing.T) {
|
||||
s := NewSymbolTable()
|
||||
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
|
||||
require.Equal(t, uint32(0), s.Symbolize(""))
|
||||
require.Equal(t, []string{""}, s.Symbols())
|
||||
|
||||
require.Equal(t, uint32(1), s.Symbolize("abc"))
|
||||
require.Equal(t, []string{"", "abc"}, s.Symbols())
|
||||
|
||||
require.Equal(t, uint32(2), s.Symbolize("__name__"))
|
||||
require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols())
|
||||
|
||||
require.Equal(t, uint32(3), s.Symbolize("foo"))
|
||||
require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols())
|
||||
|
||||
s.Reset()
|
||||
require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist")
|
||||
require.Equal(t, uint32(0), s.Symbolize(""))
|
||||
|
||||
require.Equal(t, uint32(1), s.Symbolize("__name__"))
|
||||
require.Equal(t, []string{"", "__name__"}, s.Symbols())
|
||||
|
||||
require.Equal(t, uint32(2), s.Symbolize("abc"))
|
||||
require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols())
|
||||
|
||||
ls := labels.FromStrings("__name__", "qwer", "zxcv", "1234")
|
||||
encoded := s.SymbolizeLabels(ls, nil)
|
||||
require.Equal(t, []uint32{1, 3, 4, 5}, encoded)
|
||||
b := labels.NewScratchBuilder(len(encoded))
|
||||
decoded := desymbolizeLabels(&b, encoded, s.Symbols())
|
||||
require.Equal(t, ls, decoded)
|
||||
|
||||
// Different buf.
|
||||
ls = labels.FromStrings("__name__", "qwer", "zxcv2222", "1234")
|
||||
encoded = s.SymbolizeLabels(ls, []uint32{1, 3, 4, 5})
|
||||
require.Equal(t, []uint32{1, 3, 6, 5}, encoded)
|
||||
}
|
3241
prompb/io/prometheus/write/v2/types.pb.go
Normal file
3241
prompb/io/prometheus/write/v2/types.pb.go
Normal file
File diff suppressed because it is too large
Load diff
260
prompb/io/prometheus/write/v2/types.proto
Normal file
260
prompb/io/prometheus/write/v2/types.proto
Normal file
|
@ -0,0 +1,260 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2
|
||||
|
||||
syntax = "proto3";
|
||||
package io.prometheus.write.v2;
|
||||
|
||||
option go_package = "writev2";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
// Request represents a request to write the given timeseries to a remote destination.
|
||||
// This message was introduced in the Remote Write 2.0 specification:
|
||||
// https://prometheus.io/docs/concepts/remote_write_spec_2_0/
|
||||
//
|
||||
// The canonical Content-Type request header value for this message is
|
||||
// "application/x-protobuf;proto=io.prometheus.write.v2.Request"
|
||||
//
|
||||
// NOTE: gogoproto options might change in future for this file, they
|
||||
// are not part of the spec proto (they only modify the generated Go code, not
|
||||
// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908
|
||||
message Request {
|
||||
// Since Request supersedes 1.0 spec's prometheus.WriteRequest, we reserve the top-down message
|
||||
// for the deterministic interop between those two, see types_test.go for details.
|
||||
// Generally it's not needed, because Receivers must use the Content-Type header, but we want to
|
||||
// be sympathetic to adopters with mistaken implementations and have deterministic error (empty
|
||||
// message if you use the wrong proto schema).
|
||||
reserved 1 to 3;
|
||||
|
||||
// symbols contains a de-duplicated array of string elements used for various
|
||||
// items in a Request message, like labels and metadata items. For the sender's convenience
|
||||
// around empty values for optional fields like unit_ref, symbols array MUST start with
|
||||
// empty string.
|
||||
//
|
||||
// To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you
|
||||
// need to lookup the actual string by index from symbols array. The order of
|
||||
// strings is up to the sender. The receiver should not assume any particular encoding.
|
||||
repeated string symbols = 4;
|
||||
// timeseries represents an array of distinct series with 0 or more samples.
|
||||
repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// TimeSeries represents a single series.
|
||||
message TimeSeries {
|
||||
// labels_refs is a list of label name-value pair references, encoded
|
||||
// as indices to the Request.symbols array. This list's length is always
|
||||
// a multiple of two, and the underlying labels should be sorted lexicographically.
|
||||
//
|
||||
// Note that there might be multiple TimeSeries objects in the same
|
||||
// Requests with the same labels e.g. for different exemplars, metadata
|
||||
// or created timestamp.
|
||||
repeated uint32 labels_refs = 1;
|
||||
|
||||
// Timeseries messages can either specify samples or (native) histogram samples
|
||||
// (histogram field), but not both. For a typical sender (real-time metric
|
||||
// streaming), in healthy cases, there will be only one sample or histogram.
|
||||
//
|
||||
// Samples and histograms are sorted by timestamp (older first).
|
||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||
repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// exemplars represents an optional set of exemplars attached to this series' samples.
|
||||
repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false];
|
||||
|
||||
// metadata represents the metadata associated with the given series' samples.
|
||||
Metadata metadata = 5 [(gogoproto.nullable) = false];
|
||||
|
||||
// created_timestamp represents an optional created timestamp associated with
|
||||
// this series' samples in ms format, typically for counter or histogram type
|
||||
// metrics. Created timestamp represents the time when the counter started
|
||||
// counting (sometimes referred to as start timestamp), which can increase
|
||||
// the accuracy of query results.
|
||||
//
|
||||
// Note that some receivers might require this and in return fail to
|
||||
// ingest such samples within the Request.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
//
|
||||
// Note that the "optional" keyword is omitted due to
|
||||
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||
// Zero value means value not set. If you need to use exactly zero value for
|
||||
// the timestamp, use 1 millisecond before or after.
|
||||
int64 created_timestamp = 6;
|
||||
}
|
||||
|
||||
// Exemplar is an additional information attached to some series' samples.
|
||||
// It is typically used to attach an example trace or request ID associated with
|
||||
// the metric changes.
|
||||
message Exemplar {
|
||||
// labels_refs is an optional list of label name-value pair references, encoded
|
||||
// as indices to the Request.symbols array. This list's len is always
|
||||
// a multiple of 2, and the underlying labels should be sorted lexicographically.
|
||||
// If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
|
||||
repeated uint32 labels_refs = 1;
|
||||
// value represents an exact example value. This can be useful when the exemplar
|
||||
// is attached to a histogram, which only gives an estimated value through buckets.
|
||||
double value = 2;
|
||||
// timestamp represents an optional timestamp of the sample in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
//
|
||||
// Note that the "optional" keyword is omitted due to
|
||||
// https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
|
||||
// Zero value means value not set. If you need to use exactly zero value for
|
||||
// the timestamp, use 1 millisecond before or after.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// Sample represents series sample.
|
||||
message Sample {
|
||||
// value of the sample.
|
||||
double value = 1;
|
||||
// timestamp represents timestamp of the sample in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
// Metadata represents the metadata associated with the given series' samples.
|
||||
message Metadata {
|
||||
enum MetricType {
|
||||
METRIC_TYPE_UNSPECIFIED = 0;
|
||||
METRIC_TYPE_COUNTER = 1;
|
||||
METRIC_TYPE_GAUGE = 2;
|
||||
METRIC_TYPE_HISTOGRAM = 3;
|
||||
METRIC_TYPE_GAUGEHISTOGRAM = 4;
|
||||
METRIC_TYPE_SUMMARY = 5;
|
||||
METRIC_TYPE_INFO = 6;
|
||||
METRIC_TYPE_STATESET = 7;
|
||||
}
|
||||
MetricType type = 1;
|
||||
// help_ref is a reference to the Request.symbols array representing help
|
||||
// text for the metric. Help is optional, reference should point to an empty string in
|
||||
// such a case.
|
||||
uint32 help_ref = 3;
|
||||
// unit_ref is a reference to the Request.symbols array representing a unit
|
||||
// for the metric. Unit is optional, reference should point to an empty string in
|
||||
// such a case.
|
||||
uint32 unit_ref = 4;
|
||||
}
|
||||
|
||||
// A native histogram, also known as a sparse histogram.
|
||||
// Original design doc:
|
||||
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||
// The appendix of this design doc also explains the concept of float
|
||||
// histograms. This Histogram message can represent both, the usual
|
||||
// integer histogram as well as a float histogram.
|
||||
message Histogram {
|
||||
enum ResetHint {
|
||||
RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly.
|
||||
RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset.
|
||||
RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||
RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||
}
|
||||
|
||||
oneof count { // Count of observations in the histogram.
|
||||
uint64 count_int = 1;
|
||||
double count_float = 2;
|
||||
}
|
||||
double sum = 3; // Sum of observations in the histogram.
|
||||
|
||||
// The schema defines the bucket schema. Currently, valid numbers
|
||||
// are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be
|
||||
// added in future for new bucketing layouts.
|
||||
//
|
||||
// The schema equal to -53 means custom buckets. See
|
||||
// custom_values field description for more details.
|
||||
//
|
||||
// Values between -4 and 8 represent base-2 bucket schema, where 1
|
||||
// is a bucket boundary in each case, and then each power of two is
|
||||
// divided into 2^n (n is schema value) logarithmic buckets. Or in other words,
|
||||
// each bucket boundary is the previous boundary times 2^(2^-n).
|
||||
sint32 schema = 4;
|
||||
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||
oneof zero_count { // Count in zero bucket.
|
||||
uint64 zero_count_int = 6;
|
||||
double zero_count_float = 7;
|
||||
}
|
||||
|
||||
// Negative Buckets.
|
||||
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for
|
||||
// float histograms.
|
||||
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||
|
||||
// Positive Buckets.
|
||||
//
|
||||
// In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows:
|
||||
// * The span offset+length points to an the index of the custom_values array
|
||||
// or +Inf if pointing to the len of the array.
|
||||
// * The counts and deltas have the same meaning as for exponential histograms.
|
||||
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for
|
||||
// float histograms.
|
||||
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||
|
||||
ResetHint reset_hint = 14;
|
||||
// timestamp represents timestamp of the sample in ms.
|
||||
//
|
||||
// For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
|
||||
// for conversion from/to time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 15;
|
||||
|
||||
// custom_values is an additional field used by non-exponential bucketing layouts.
|
||||
//
|
||||
// For custom buckets (-53 schema value) custom_values specify monotonically
|
||||
// increasing upper inclusive boundaries for the bucket counts with arbitrary
|
||||
// widths for this histogram. In other words, custom_values represents custom,
|
||||
// explicit bucketing that could have been converted from the classic histograms.
|
||||
//
|
||||
// Those bounds are then referenced by spans in positive_spans with corresponding positive
|
||||
// counts of deltas (refer to positive_spans for more details). This way we can
|
||||
// have encode sparse histograms with custom bucketing (many buckets are often
|
||||
// not used).
|
||||
//
|
||||
// Note that for custom bounds, even negative observations are placed in the positive
|
||||
// counts to simplify the implementation and avoid ambiguity of where to place
|
||||
// an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and
|
||||
// the zero bucket are unused, if the schema indicates custom bucketing.
|
||||
//
|
||||
// For each upper boundary the previous boundary represent the lower exclusive
|
||||
// boundary for that bucket. The first element is the upper inclusive boundary
|
||||
// for the first bucket, which implicitly has a lower inclusive bound of -Inf.
|
||||
// This is similar to "le" label semantics on classic histograms. You may add a
|
||||
// bucket with an upper bound of 0 to make sure that you really have no negative
|
||||
// observations, but in practice, native histogram rendering will show both with
|
||||
// or without first upper boundary 0 and no negative counts as the same case.
|
||||
//
|
||||
// The last element is not only the upper inclusive bound of the last regular
|
||||
// bucket, but implicitly the lower exclusive bound of the +Inf bucket.
|
||||
repeated double custom_values = 16;
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets with their
|
||||
// offset. Logically, it would be more straightforward to include the
|
||||
// bucket counts in the Span. However, the protobuf representation is
|
||||
// more compact in the way the data is structured here (with all the
|
||||
// buckets in a single array separate from the Spans).
|
||||
message BucketSpan {
|
||||
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||
uint32 length = 2; // Length of consecutive buckets.
|
||||
}
|
97
prompb/io/prometheus/write/v2/types_test.go
Normal file
97
prompb/io/prometheus/write/v2/types_test.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package writev2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
)
|
||||
|
||||
func TestInteropV2UnmarshalWithV1_DeterministicEmpty(t *testing.T) {
|
||||
expectedV1Empty := &prompb.WriteRequest{}
|
||||
for _, tc := range []struct{ incoming *Request }{
|
||||
{
|
||||
incoming: &Request{}, // Technically wrong, should be at least empty string in symbol.
|
||||
},
|
||||
{
|
||||
incoming: &Request{
|
||||
Symbols: []string{""},
|
||||
}, // NOTE: Without reserved fields, failed with "corrupted" ghost TimeSeries element.
|
||||
},
|
||||
{
|
||||
incoming: &Request{
|
||||
Symbols: []string{"", "__name__", "metric1"},
|
||||
Timeseries: []TimeSeries{
|
||||
{LabelsRefs: []uint32{1, 2}},
|
||||
{Samples: []Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}}},
|
||||
}, // NOTE: Without reserved fields, proto: illegal wireType 7
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
in, err := proto.Marshal(tc.incoming)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test accidental unmarshal of v2 payload with v1 proto.
|
||||
out := &prompb.WriteRequest{}
|
||||
require.NoError(t, proto.Unmarshal(in, out))
|
||||
|
||||
// Drop unknowns, we expect them when incoming payload had some fields.
|
||||
// This field & method will be likely gone after gogo removal.
|
||||
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
|
||||
|
||||
require.Equal(t, expectedV1Empty, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInteropV1UnmarshalWithV2_DeterministicEmpty(t *testing.T) {
|
||||
expectedV2Empty := &Request{}
|
||||
for _, tc := range []struct{ incoming *prompb.WriteRequest }{
|
||||
{
|
||||
incoming: &prompb.WriteRequest{},
|
||||
},
|
||||
{
|
||||
incoming: &prompb.WriteRequest{
|
||||
Timeseries: []prompb.TimeSeries{
|
||||
{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}},
|
||||
Samples: []prompb.Sample{{Value: 21.4, Timestamp: time.Now().UnixMilli()}},
|
||||
},
|
||||
},
|
||||
},
|
||||
// NOTE: Without reserved fields, results in corrupted v2.Request.Symbols.
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
in, err := proto.Marshal(tc.incoming)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test accidental unmarshal of v1 payload with v2 proto.
|
||||
out := &Request{}
|
||||
require.NoError(t, proto.Unmarshal(in, out))
|
||||
|
||||
// Drop unknowns, we expect them when incoming payload had some fields.
|
||||
// This field & method will be likely gone after gogo removal.
|
||||
out.XXX_unrecognized = nil // NOTE: out.XXX_DiscardUnknown() does not work with nullables.
|
||||
|
||||
require.Equal(t, expectedV2Empty, out)
|
||||
})
|
||||
}
|
||||
}
|
298
prompb/rwcommon/codec_test.go
Normal file
298
prompb/rwcommon/codec_test.go
Normal file
|
@ -0,0 +1,298 @@
|
|||
// Copyright 2024 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rwcommon
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
)
|
||||
|
||||
func TestToLabels(t *testing.T) {
|
||||
expected := labels.FromStrings("__name__", "metric1", "foo", "bar")
|
||||
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
ts := prompb.TimeSeries{Labels: []prompb.Label{{Name: "__name__", Value: "metric1"}, {Name: "foo", Value: "bar"}}}
|
||||
b := labels.NewScratchBuilder(2)
|
||||
require.Equal(t, expected, ts.ToLabels(&b, nil))
|
||||
require.Equal(t, ts.Labels, prompb.FromLabels(expected, nil))
|
||||
require.Equal(t, ts.Labels, prompb.FromLabels(expected, ts.Labels))
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
v2Symbols := []string{"", "__name__", "metric1", "foo", "bar"}
|
||||
ts := writev2.TimeSeries{LabelsRefs: []uint32{1, 2, 3, 4}}
|
||||
b := labels.NewScratchBuilder(2)
|
||||
require.Equal(t, expected, ts.ToLabels(&b, v2Symbols))
|
||||
// No need for FromLabels in our prod code as we use symbol table to do so.
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromMetadataType(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input model.MetricType
|
||||
expectedV1 prompb.MetricMetadata_MetricType
|
||||
expectedV2 writev2.Metadata_MetricType
|
||||
}{
|
||||
{
|
||||
desc: "with a single-word metric",
|
||||
input: model.MetricTypeCounter,
|
||||
expectedV1: prompb.MetricMetadata_COUNTER,
|
||||
expectedV2: writev2.Metadata_METRIC_TYPE_COUNTER,
|
||||
},
|
||||
{
|
||||
desc: "with a two-word metric",
|
||||
input: model.MetricTypeStateset,
|
||||
expectedV1: prompb.MetricMetadata_STATESET,
|
||||
expectedV2: writev2.Metadata_METRIC_TYPE_STATESET,
|
||||
},
|
||||
{
|
||||
desc: "with an unknown metric",
|
||||
input: "not-known",
|
||||
expectedV1: prompb.MetricMetadata_UNKNOWN,
|
||||
expectedV2: writev2.Metadata_METRIC_TYPE_UNSPECIFIED,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
require.Equal(t, tc.expectedV1, prompb.FromMetadataType(tc.input))
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
require.Equal(t, tc.expectedV2, writev2.FromMetadataType(tc.input))
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToMetadata(t *testing.T) {
|
||||
sym := writev2.NewSymbolTable()
|
||||
|
||||
for _, tc := range []struct {
|
||||
input writev2.Metadata
|
||||
expected metadata.Metadata
|
||||
}{
|
||||
{
|
||||
input: writev2.Metadata{},
|
||||
expected: metadata.Metadata{
|
||||
Type: model.MetricTypeUnknown,
|
||||
},
|
||||
},
|
||||
{
|
||||
input: writev2.Metadata{
|
||||
Type: 12414, // Unknown.
|
||||
},
|
||||
expected: metadata.Metadata{
|
||||
Type: model.MetricTypeUnknown,
|
||||
},
|
||||
},
|
||||
{
|
||||
input: writev2.Metadata{
|
||||
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
|
||||
HelpRef: sym.Symbolize("help1"),
|
||||
UnitRef: sym.Symbolize("unit1"),
|
||||
},
|
||||
expected: metadata.Metadata{
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "help1",
|
||||
Unit: "unit1",
|
||||
},
|
||||
},
|
||||
{
|
||||
input: writev2.Metadata{
|
||||
Type: writev2.Metadata_METRIC_TYPE_STATESET,
|
||||
HelpRef: sym.Symbolize("help2"),
|
||||
},
|
||||
expected: metadata.Metadata{
|
||||
Type: model.MetricTypeStateset,
|
||||
Help: "help2",
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
ts := writev2.TimeSeries{Metadata: tc.input}
|
||||
require.Equal(t, tc.expected, ts.ToMetadata(sym.Symbols()))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToHistogram_Empty(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "")
|
||||
require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "")
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "")
|
||||
require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "")
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
|
||||
// important cases to test when copying or converting to/from int/float histograms.
|
||||
func testIntHistogram() histogram.Histogram {
|
||||
return histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 1,
|
||||
Count: 19,
|
||||
Sum: 2.7,
|
||||
ZeroThreshold: 1e-128,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 5},
|
||||
{Offset: 1, Length: 0},
|
||||
{Offset: 0, Length: 1},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||
CustomValues: []float64{21421, 523},
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
|
||||
// important cases to test when copying or converting to/from int/float histograms.
|
||||
func testFloatHistogram() histogram.FloatHistogram {
|
||||
return histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 1,
|
||||
Count: 19,
|
||||
Sum: 2.7,
|
||||
ZeroThreshold: 1e-128,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 3, 1, 2, 1, 1, 1},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 5},
|
||||
{Offset: 1, Length: 0},
|
||||
{Offset: 0, Length: 1},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 3, 1, 2, 1, 1},
|
||||
CustomValues: []float64{21421, 523},
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromIntToFloatOrIntHistogram(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
// v1 does not support nhcb.
|
||||
testIntHistWithoutNHCB := testIntHistogram()
|
||||
testIntHistWithoutNHCB.CustomValues = nil
|
||||
testFloatHistWithoutNHCB := testFloatHistogram()
|
||||
testFloatHistWithoutNHCB.CustomValues = nil
|
||||
|
||||
h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB)
|
||||
require.False(t, h.IsFloatHistogram())
|
||||
require.Equal(t, int64(123), h.Timestamp)
|
||||
require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram())
|
||||
require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
testIntHist := testIntHistogram()
|
||||
testFloatHist := testFloatHistogram()
|
||||
|
||||
h := writev2.FromIntHistogram(123, &testIntHist)
|
||||
require.False(t, h.IsFloatHistogram())
|
||||
require.Equal(t, int64(123), h.Timestamp)
|
||||
require.Equal(t, testIntHist, *h.ToIntHistogram())
|
||||
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromFloatToFloatHistogram(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
// v1 does not support nhcb.
|
||||
testFloatHistWithoutNHCB := testFloatHistogram()
|
||||
testFloatHistWithoutNHCB.CustomValues = nil
|
||||
|
||||
h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB)
|
||||
require.True(t, h.IsFloatHistogram())
|
||||
require.Equal(t, int64(123), h.Timestamp)
|
||||
require.Nil(t, h.ToIntHistogram())
|
||||
require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
testFloatHist := testFloatHistogram()
|
||||
|
||||
h := writev2.FromFloatHistogram(123, &testFloatHist)
|
||||
require.True(t, h.IsFloatHistogram())
|
||||
require.Equal(t, int64(123), h.Timestamp)
|
||||
require.Nil(t, h.ToIntHistogram())
|
||||
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
|
||||
})
|
||||
}
|
||||
|
||||
func TestFromIntOrFloatHistogram_ResetHint(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
input histogram.CounterResetHint
|
||||
expectedV1 prompb.Histogram_ResetHint
|
||||
expectedV2 writev2.Histogram_ResetHint
|
||||
}{
|
||||
{
|
||||
input: histogram.UnknownCounterReset,
|
||||
expectedV1: prompb.Histogram_UNKNOWN,
|
||||
expectedV2: writev2.Histogram_RESET_HINT_UNSPECIFIED,
|
||||
},
|
||||
{
|
||||
input: histogram.CounterReset,
|
||||
expectedV1: prompb.Histogram_YES,
|
||||
expectedV2: writev2.Histogram_RESET_HINT_YES,
|
||||
},
|
||||
{
|
||||
input: histogram.NotCounterReset,
|
||||
expectedV1: prompb.Histogram_NO,
|
||||
expectedV2: writev2.Histogram_RESET_HINT_NO,
|
||||
},
|
||||
{
|
||||
input: histogram.GaugeType,
|
||||
expectedV1: prompb.Histogram_GAUGE,
|
||||
expectedV2: writev2.Histogram_RESET_HINT_GAUGE,
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
h := testIntHistogram()
|
||||
h.CounterResetHint = tc.input
|
||||
got := prompb.FromIntHistogram(1337, &h)
|
||||
require.Equal(t, tc.expectedV1, got.GetResetHint())
|
||||
|
||||
fh := testFloatHistogram()
|
||||
fh.CounterResetHint = tc.input
|
||||
got2 := prompb.FromFloatHistogram(1337, &fh)
|
||||
require.Equal(t, tc.expectedV1, got2.GetResetHint())
|
||||
})
|
||||
t.Run("v2", func(t *testing.T) {
|
||||
h := testIntHistogram()
|
||||
h.CounterResetHint = tc.input
|
||||
got := writev2.FromIntHistogram(1337, &h)
|
||||
require.Equal(t, tc.expectedV2, got.GetResetHint())
|
||||
|
||||
fh := testFloatHistogram()
|
||||
fh.CounterResetHint = tc.input
|
||||
got2 := writev2.FromFloatHistogram(1337, &fh)
|
||||
require.Equal(t, tc.expectedV2, got2.GetResetHint())
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
|
@ -187,6 +187,21 @@ func rangeQueryCases() []benchCase {
|
|||
{
|
||||
expr: "topk(5, a_X)",
|
||||
},
|
||||
{
|
||||
expr: "limitk(1, a_X)",
|
||||
},
|
||||
{
|
||||
expr: "limitk(5, a_X)",
|
||||
},
|
||||
{
|
||||
expr: "limit_ratio(0.1, a_X)",
|
||||
},
|
||||
{
|
||||
expr: "limit_ratio(0.5, a_X)",
|
||||
},
|
||||
{
|
||||
expr: "limit_ratio(-0.5, a_X)",
|
||||
},
|
||||
// Combinations.
|
||||
{
|
||||
expr: "rate(a_X[1m]) + rate(b_X[1m])",
|
||||
|
|
152
promql/engine.go
152
promql/engine.go
|
@ -1324,7 +1324,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
index, ok := groupToResultIndex[groupingKey]
|
||||
// Add a new group if it doesn't exist.
|
||||
if !ok {
|
||||
if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK {
|
||||
if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK && aggExpr.Op != parser.LIMITK && aggExpr.Op != parser.LIMIT_RATIO {
|
||||
m := generateGroupingLabels(enh, series.Metric, aggExpr.Without, sortedGrouping)
|
||||
result = append(result, Series{Metric: m})
|
||||
}
|
||||
|
@ -1337,9 +1337,10 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
groups := make([]groupedAggregation, groupCount)
|
||||
|
||||
var k int
|
||||
var ratio float64
|
||||
var seriess map[uint64]Series
|
||||
switch aggExpr.Op {
|
||||
case parser.TOPK, parser.BOTTOMK:
|
||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK:
|
||||
if !convertibleToInt64(param) {
|
||||
ev.errorf("Scalar value %v overflows int64", param)
|
||||
}
|
||||
|
@ -1351,6 +1352,23 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
return nil, warnings
|
||||
}
|
||||
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
|
||||
case parser.LIMIT_RATIO:
|
||||
if math.IsNaN(param) {
|
||||
ev.errorf("Ratio value %v is NaN", param)
|
||||
}
|
||||
switch {
|
||||
case param == 0:
|
||||
return nil, warnings
|
||||
case param < -1.0:
|
||||
ratio = -1.0
|
||||
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
|
||||
case param > 1.0:
|
||||
ratio = 1.0
|
||||
warnings.Add(annotations.NewInvalidRatioWarning(param, ratio, aggExpr.Param.PositionRange()))
|
||||
default:
|
||||
ratio = param
|
||||
}
|
||||
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
|
||||
case parser.QUANTILE:
|
||||
if math.IsNaN(param) || param < 0 || param > 1 {
|
||||
warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange()))
|
||||
|
@ -1368,11 +1386,12 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
enh.Ts = ts
|
||||
var ws annotations.Annotations
|
||||
switch aggExpr.Op {
|
||||
case parser.TOPK, parser.BOTTOMK:
|
||||
result, ws = ev.aggregationK(aggExpr, k, inputMatrix, seriesToResult, groups, enh, seriess)
|
||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO:
|
||||
result, ws = ev.aggregationK(aggExpr, k, ratio, inputMatrix, seriesToResult, groups, enh, seriess)
|
||||
// If this could be an instant query, shortcut so as not to change sort order.
|
||||
if ev.endTimestamp == ev.startTimestamp {
|
||||
return result, ws
|
||||
warnings.Merge(ws)
|
||||
return result, warnings
|
||||
}
|
||||
default:
|
||||
ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh)
|
||||
|
@ -1387,7 +1406,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping
|
|||
|
||||
// Assemble the output matrix. By the time we get here we know we don't have too many samples.
|
||||
switch aggExpr.Op {
|
||||
case parser.TOPK, parser.BOTTOMK:
|
||||
case parser.TOPK, parser.BOTTOMK, parser.LIMITK, parser.LIMIT_RATIO:
|
||||
result = make(Matrix, 0, len(seriess))
|
||||
for _, ss := range seriess {
|
||||
result = append(result, ss)
|
||||
|
@ -2760,14 +2779,15 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
|
|||
}
|
||||
|
||||
type groupedAggregation struct {
|
||||
seen bool // Was this output groups seen in the input at this timestamp.
|
||||
hasFloat bool // Has at least 1 float64 sample aggregated.
|
||||
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
||||
floatValue float64
|
||||
histogramValue *histogram.FloatHistogram
|
||||
floatMean float64 // Mean, or "compensating value" for Kahan summation.
|
||||
groupCount int
|
||||
heap vectorByValueHeap
|
||||
seen bool // Was this output groups seen in the input at this timestamp.
|
||||
hasFloat bool // Has at least 1 float64 sample aggregated.
|
||||
hasHistogram bool // Has at least 1 histogram sample aggregated.
|
||||
floatValue float64
|
||||
histogramValue *histogram.FloatHistogram
|
||||
floatMean float64 // Mean, or "compensating value" for Kahan summation.
|
||||
groupCount int
|
||||
groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group
|
||||
heap vectorByValueHeap
|
||||
}
|
||||
|
||||
// aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
|
||||
|
@ -2964,19 +2984,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
|
|||
return annos
|
||||
}
|
||||
|
||||
// aggregationK evaluates topk or bottomk at one timestep on inputMatrix.
|
||||
// aggregationK evaluates topk, bottomk, limitk, or limit_ratio at one timestep on inputMatrix.
|
||||
// Output that has the same labels as the input, but just k of them per group.
|
||||
// seriesToResult maps inputMatrix indexes to groups indexes.
|
||||
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk.
|
||||
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio.
|
||||
// For a range query, aggregates output in the seriess map.
|
||||
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
|
||||
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
|
||||
op := e.Op
|
||||
var s Sample
|
||||
var annos annotations.Annotations
|
||||
// Used to short-cut the loop for LIMITK if we already collected k elements for every group
|
||||
groupsRemaining := len(groups)
|
||||
for i := range groups {
|
||||
groups[i].seen = false
|
||||
}
|
||||
|
||||
seriesLoop:
|
||||
for si := range inputMatrix {
|
||||
f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si])
|
||||
if !ok {
|
||||
|
@ -2987,11 +3010,23 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma
|
|||
group := &groups[seriesToResult[si]]
|
||||
// Initialize this group if it's the first time we've seen it.
|
||||
if !group.seen {
|
||||
*group = groupedAggregation{
|
||||
seen: true,
|
||||
heap: make(vectorByValueHeap, 1, k),
|
||||
// LIMIT_RATIO is a special case, as we may not add this very sample to the heap,
|
||||
// while we also don't know the final size of it.
|
||||
if op == parser.LIMIT_RATIO {
|
||||
*group = groupedAggregation{
|
||||
seen: true,
|
||||
heap: make(vectorByValueHeap, 0),
|
||||
}
|
||||
if ratiosampler.AddRatioSample(r, &s) {
|
||||
heap.Push(&group.heap, &s)
|
||||
}
|
||||
} else {
|
||||
*group = groupedAggregation{
|
||||
seen: true,
|
||||
heap: make(vectorByValueHeap, 1, k),
|
||||
}
|
||||
group.heap[0] = s
|
||||
}
|
||||
group.heap[0] = s
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -3022,6 +3057,26 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma
|
|||
}
|
||||
}
|
||||
|
||||
case parser.LIMITK:
|
||||
if len(group.heap) < k {
|
||||
heap.Push(&group.heap, &s)
|
||||
}
|
||||
// LIMITK optimization: early break if we've added K elem to _every_ group,
|
||||
// especially useful for large timeseries where the user is exploring labels via e.g.
|
||||
// limitk(10, my_metric)
|
||||
if !group.groupAggrComplete && len(group.heap) == k {
|
||||
group.groupAggrComplete = true
|
||||
groupsRemaining--
|
||||
if groupsRemaining == 0 {
|
||||
break seriesLoop
|
||||
}
|
||||
}
|
||||
|
||||
case parser.LIMIT_RATIO:
|
||||
if ratiosampler.AddRatioSample(r, &s) {
|
||||
heap.Push(&group.heap, &s)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("expected aggregation operator but got %q", op))
|
||||
}
|
||||
|
@ -3071,6 +3126,11 @@ func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Ma
|
|||
for _, v := range aggr.heap {
|
||||
add(v.Metric, v.F)
|
||||
}
|
||||
|
||||
case parser.LIMITK, parser.LIMIT_RATIO:
|
||||
for _, v := range aggr.heap {
|
||||
add(v.Metric, v.F)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3425,6 +3485,56 @@ func makeInt64Pointer(val int64) *int64 {
|
|||
return valp
|
||||
}
|
||||
|
||||
// Add RatioSampler interface to allow unit-testing (previously: Randomizer).
|
||||
type RatioSampler interface {
|
||||
// Return this sample "offset" between [0.0, 1.0]
|
||||
sampleOffset(ts int64, sample *Sample) float64
|
||||
AddRatioSample(r float64, sample *Sample) bool
|
||||
}
|
||||
|
||||
// Use Hash(labels.String()) / maxUint64 as a "deterministic"
|
||||
// value in [0.0, 1.0].
|
||||
type HashRatioSampler struct{}
|
||||
|
||||
var ratiosampler RatioSampler = NewHashRatioSampler()
|
||||
|
||||
func NewHashRatioSampler() *HashRatioSampler {
|
||||
return &HashRatioSampler{}
|
||||
}
|
||||
|
||||
func (s *HashRatioSampler) sampleOffset(ts int64, sample *Sample) float64 {
|
||||
const (
|
||||
float64MaxUint64 = float64(math.MaxUint64)
|
||||
)
|
||||
return float64(sample.Metric.Hash()) / float64MaxUint64
|
||||
}
|
||||
|
||||
func (s *HashRatioSampler) AddRatioSample(ratioLimit float64, sample *Sample) bool {
|
||||
// If ratioLimit >= 0: add sample if sampleOffset is lesser than ratioLimit
|
||||
//
|
||||
// 0.0 ratioLimit 1.0
|
||||
// [---------|--------------------------]
|
||||
// [#########...........................]
|
||||
//
|
||||
// e.g.:
|
||||
// sampleOffset==0.3 && ratioLimit==0.4
|
||||
// 0.3 < 0.4 ? --> add sample
|
||||
//
|
||||
// Else if ratioLimit < 0: add sample if rand() return the "complement" of ratioLimit>=0 case
|
||||
// (loosely similar behavior to negative array index in other programming languages)
|
||||
//
|
||||
// 0.0 1+ratioLimit 1.0
|
||||
// [---------|--------------------------]
|
||||
// [.........###########################]
|
||||
//
|
||||
// e.g.:
|
||||
// sampleOffset==0.3 && ratioLimit==-0.6
|
||||
// 0.3 >= 0.4 ? --> don't add sample
|
||||
sampleOffset := s.sampleOffset(sample.T, sample)
|
||||
return (ratioLimit >= 0 && sampleOffset < ratioLimit) ||
|
||||
(ratioLimit < 0 && sampleOffset >= (1.0+ratioLimit))
|
||||
}
|
||||
|
||||
type histogramStatsSeries struct {
|
||||
storage.Series
|
||||
}
|
||||
|
|
|
@ -49,6 +49,8 @@ const (
|
|||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Enable experimental functions testing
|
||||
parser.EnableExperimentalFunctions = true
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
|
|
|
@ -993,10 +993,14 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
|
|||
|
||||
func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
|
||||
t := sum + inc
|
||||
switch {
|
||||
case math.IsInf(t, 0):
|
||||
c = 0
|
||||
|
||||
// Using Neumaier improvement, swap if next term larger than sum.
|
||||
if math.Abs(sum) >= math.Abs(inc) {
|
||||
case math.Abs(sum) >= math.Abs(inc):
|
||||
c += (sum - t) + inc
|
||||
} else {
|
||||
default:
|
||||
c += (inc - t) + sum
|
||||
}
|
||||
return t, c
|
||||
|
|
81
promql/functions_internal_test.go
Normal file
81
promql/functions_internal_test.go
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestKahanSumInc(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
first float64
|
||||
second float64
|
||||
expected float64
|
||||
}{
|
||||
"+Inf + anything = +Inf": {
|
||||
first: math.Inf(1),
|
||||
second: 2.0,
|
||||
expected: math.Inf(1),
|
||||
},
|
||||
"-Inf + anything = -Inf": {
|
||||
first: math.Inf(-1),
|
||||
second: 2.0,
|
||||
expected: math.Inf(-1),
|
||||
},
|
||||
"+Inf + -Inf = NaN": {
|
||||
first: math.Inf(1),
|
||||
second: math.Inf(-1),
|
||||
expected: math.NaN(),
|
||||
},
|
||||
"NaN + anything = NaN": {
|
||||
first: math.NaN(),
|
||||
second: 2,
|
||||
expected: math.NaN(),
|
||||
},
|
||||
"NaN + Inf = NaN": {
|
||||
first: math.NaN(),
|
||||
second: math.Inf(1),
|
||||
expected: math.NaN(),
|
||||
},
|
||||
"NaN + -Inf = NaN": {
|
||||
first: math.NaN(),
|
||||
second: math.Inf(-1),
|
||||
expected: math.NaN(),
|
||||
},
|
||||
}
|
||||
|
||||
runTest := func(t *testing.T, a, b, expected float64) {
|
||||
t.Run(fmt.Sprintf("%v + %v = %v", a, b, expected), func(t *testing.T) {
|
||||
sum, c := kahanSumInc(b, a, 0)
|
||||
result := sum + c
|
||||
|
||||
if math.IsNaN(expected) {
|
||||
require.Truef(t, math.IsNaN(result), "expected result to be NaN, but got %v (from %v + %v)", result, sum, c)
|
||||
} else {
|
||||
require.Equalf(t, expected, result, "expected result to be %v, but got %v (from %v + %v)", expected, result, sum, c)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
runTest(t, testCase.first, testCase.second, testCase.expected)
|
||||
runTest(t, testCase.second, testCase.first, testCase.expected)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -126,6 +126,8 @@ STDDEV
|
|||
STDVAR
|
||||
SUM
|
||||
TOPK
|
||||
LIMITK
|
||||
LIMIT_RATIO
|
||||
%token aggregatorsEnd
|
||||
|
||||
// Keywords.
|
||||
|
@ -609,7 +611,7 @@ metric : metric_identifier label_set
|
|||
;
|
||||
|
||||
|
||||
metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END;
|
||||
metric_identifier: AVG | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | IDENTIFIER | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | QUANTILE | STDDEV | STDVAR | SUM | TOPK | WITHOUT | START | END | LIMITK | LIMIT_RATIO;
|
||||
|
||||
label_set : LEFT_BRACE label_set_list RIGHT_BRACE
|
||||
{ $$ = labels.New($2...) }
|
||||
|
@ -851,10 +853,10 @@ bucket_set_list : bucket_set_list SPACE number
|
|||
* Keyword lists.
|
||||
*/
|
||||
|
||||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK ;
|
||||
aggregate_op : AVG | BOTTOMK | COUNT | COUNT_VALUES | GROUP | MAX | MIN | QUANTILE | STDDEV | STDVAR | SUM | TOPK | LIMITK | LIMIT_RATIO;
|
||||
|
||||
// Inside of grouping options label names can be recognized as keywords by the lexer. This is a list of keywords that could also be a label name.
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2;
|
||||
maybe_label : AVG | BOOL | BOTTOMK | BY | COUNT | COUNT_VALUES | GROUP | GROUP_LEFT | GROUP_RIGHT | IDENTIFIER | IGNORING | LAND | LOR | LUNLESS | MAX | METRIC_IDENTIFIER | MIN | OFFSET | ON | QUANTILE | STDDEV | STDVAR | SUM | TOPK | START | END | ATAN2 | LIMITK | LIMIT_RATIO;
|
||||
|
||||
unary_op : ADD | SUB;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -65,7 +65,7 @@ func (i ItemType) IsAggregator() bool { return i > aggregatorsStart && i < aggre
|
|||
// IsAggregatorWithParam returns true if the Item is an aggregator that takes a parameter.
|
||||
// Returns false otherwise.
|
||||
func (i ItemType) IsAggregatorWithParam() bool {
|
||||
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE
|
||||
return i == TOPK || i == BOTTOMK || i == COUNT_VALUES || i == QUANTILE || i == LIMITK || i == LIMIT_RATIO
|
||||
}
|
||||
|
||||
// IsKeyword returns true if the Item corresponds to a keyword.
|
||||
|
@ -118,6 +118,8 @@ var key = map[string]ItemType{
|
|||
"bottomk": BOTTOMK,
|
||||
"count_values": COUNT_VALUES,
|
||||
"quantile": QUANTILE,
|
||||
"limitk": LIMITK,
|
||||
"limit_ratio": LIMIT_RATIO,
|
||||
|
||||
// Keywords.
|
||||
"offset": OFFSET,
|
||||
|
|
|
@ -447,6 +447,10 @@ func (p *parser) newAggregateExpr(op Item, modifier, args Node) (ret *AggregateE
|
|||
|
||||
desiredArgs := 1
|
||||
if ret.Op.IsAggregatorWithParam() {
|
||||
if !EnableExperimentalFunctions && (ret.Op == LIMITK || ret.Op == LIMIT_RATIO) {
|
||||
p.addParseErrf(ret.PositionRange(), "limitk() and limit_ratio() are experimental and must be enabled with --enable-feature=promql-experimental-functions")
|
||||
return
|
||||
}
|
||||
desiredArgs = 2
|
||||
|
||||
ret.Param = arguments[0]
|
||||
|
@ -672,7 +676,7 @@ func (p *parser) checkAST(node Node) (typ ValueType) {
|
|||
p.addParseErrf(n.PositionRange(), "aggregation operator expected in aggregation expression but got %q", n.Op)
|
||||
}
|
||||
p.expectType(n.Expr, ValueTypeVector, "aggregation expression")
|
||||
if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE {
|
||||
if n.Op == TOPK || n.Op == BOTTOMK || n.Op == QUANTILE || n.Op == LIMITK || n.Op == LIMIT_RATIO {
|
||||
p.expectType(n.Param, ValueTypeScalar, "aggregation parameter")
|
||||
}
|
||||
if n.Op == COUNT_VALUES {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
)
|
||||
|
@ -45,6 +46,8 @@ func TestConcurrentRangeQueries(t *testing.T) {
|
|||
MaxSamples: 50000000,
|
||||
Timeout: 100 * time.Second,
|
||||
}
|
||||
// Enable experimental functions testing
|
||||
parser.EnableExperimentalFunctions = true
|
||||
engine := promql.NewEngine(opts)
|
||||
|
||||
const interval = 10000 // 10s interval.
|
||||
|
|
29
promql/promqltest/testdata/aggregators.test
vendored
29
promql/promqltest/testdata/aggregators.test
vendored
|
@ -511,10 +511,39 @@ load 10s
|
|||
data{test="ten",point="b"} 8
|
||||
data{test="ten",point="c"} 1e+100
|
||||
data{test="ten",point="d"} -1e100
|
||||
data{test="pos_inf",group="1",point="a"} Inf
|
||||
data{test="pos_inf",group="1",point="b"} 2
|
||||
data{test="pos_inf",group="2",point="a"} 2
|
||||
data{test="pos_inf",group="2",point="b"} Inf
|
||||
data{test="neg_inf",group="1",point="a"} -Inf
|
||||
data{test="neg_inf",group="1",point="b"} 2
|
||||
data{test="neg_inf",group="2",point="a"} 2
|
||||
data{test="neg_inf",group="2",point="b"} -Inf
|
||||
data{test="inf_inf",point="a"} Inf
|
||||
data{test="inf_inf",point="b"} -Inf
|
||||
data{test="nan",group="1",point="a"} NaN
|
||||
data{test="nan",group="1",point="b"} 2
|
||||
data{test="nan",group="2",point="a"} 2
|
||||
data{test="nan",group="2",point="b"} NaN
|
||||
|
||||
eval instant at 1m sum(data{test="ten"})
|
||||
{} 10
|
||||
|
||||
eval instant at 1m sum by (group) (data{test="pos_inf"})
|
||||
{group="1"} Inf
|
||||
{group="2"} Inf
|
||||
|
||||
eval instant at 1m sum by (group) (data{test="neg_inf"})
|
||||
{group="1"} -Inf
|
||||
{group="2"} -Inf
|
||||
|
||||
eval instant at 1m sum(data{test="inf_inf"})
|
||||
{} NaN
|
||||
|
||||
eval instant at 1m sum by (group) (data{test="nan"})
|
||||
{group="1"} NaN
|
||||
{group="2"} NaN
|
||||
|
||||
clear
|
||||
|
||||
# Test that aggregations are deterministic.
|
||||
|
|
119
promql/promqltest/testdata/limit.test
vendored
Normal file
119
promql/promqltest/testdata/limit.test
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
# Tests for limitk
|
||||
#
|
||||
# NB: those many `and http_requests` are to ensure that the series _are_ indeed
|
||||
# a subset of the original series.
|
||||
load 5m
|
||||
http_requests{job="api-server", instance="0", group="production"} 0+10x10
|
||||
http_requests{job="api-server", instance="1", group="production"} 0+20x10
|
||||
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
|
||||
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
|
||||
http_requests{job="api-server", instance="2", group="canary"} 0+50x10
|
||||
http_requests{job="api-server", instance="3", group="canary"} 0+60x10
|
||||
|
||||
eval instant at 50m count(limitk by (group) (0, http_requests))
|
||||
# empty
|
||||
|
||||
eval instant at 50m count(limitk by (group) (-1, http_requests))
|
||||
# empty
|
||||
|
||||
# Exercise k==1 special case (as sample is added before the main series loop
|
||||
eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests)
|
||||
{} 2
|
||||
|
||||
eval instant at 50m count(limitk by (group) (2, http_requests) and http_requests)
|
||||
{} 4
|
||||
|
||||
eval instant at 50m count(limitk(100, http_requests) and http_requests)
|
||||
{} 6
|
||||
|
||||
# Exercise k==1 special case (as sample is added before the main series loop
|
||||
eval instant at 50m count(limitk by (group) (1, http_requests) and http_requests)
|
||||
{} 2
|
||||
|
||||
eval instant at 50m count(limitk by (group) (2, http_requests) and http_requests)
|
||||
{} 4
|
||||
|
||||
eval instant at 50m count(limitk(100, http_requests) and http_requests)
|
||||
{} 6
|
||||
|
||||
# limit_ratio
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.0, http_requests))
|
||||
# empty
|
||||
|
||||
# limitk(2, ...) should always return a 2-count subset of the timeseries (hence the AND'ing)
|
||||
eval range from 0 to 50m step 5m count(limitk(2, http_requests) and http_requests)
|
||||
{} 2+0x10
|
||||
|
||||
# Tests for limit_ratio
|
||||
#
|
||||
# NB: below 0.5 ratio will depend on some hashing "luck" (also there's no guarantee that
|
||||
# an integer comes from: total number of series * ratio), as it depends on:
|
||||
#
|
||||
# * ratioLimit = [0.0, 1.0]:
|
||||
# float64(sample.Metric.Hash()) / float64MaxUint64 < Ratio ?
|
||||
# * ratioLimit = [-1.0, 1.0):
|
||||
# float64(sample.Metric.Hash()) / float64MaxUint64 >= (1.0 + Ratio) ?
|
||||
#
|
||||
# See `AddRatioSample()` in promql/engine.go for more details.
|
||||
|
||||
# Half~ish samples: verify we get "near" 3 (of 0.5 * 6)
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) <= bool (3+1)
|
||||
{} 1+0x10
|
||||
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and http_requests) >= bool (3-1)
|
||||
{} 1+0x10
|
||||
|
||||
# All samples
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(1.0, http_requests) and http_requests)
|
||||
{} 6+0x10
|
||||
|
||||
# All samples
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(-1.0, http_requests) and http_requests)
|
||||
{} 6+0x10
|
||||
|
||||
# Capped to 1.0 -> all samples
|
||||
eval_warn range from 0 to 50m step 5m count(limit_ratio(1.1, http_requests) and http_requests)
|
||||
{} 6+0x10
|
||||
|
||||
# Capped to -1.0 -> all samples
|
||||
eval_warn range from 0 to 50m step 5m count(limit_ratio(-1.1, http_requests) and http_requests)
|
||||
{} 6+0x10
|
||||
|
||||
# Verify that limit_ratio(value) and limit_ratio(1.0-value) return the "complement" of each other
|
||||
# Complement below for [0.2, -0.8]
|
||||
#
|
||||
# Complement 1of2: `or` should return all samples
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.2, http_requests) or limit_ratio(-0.8, http_requests))
|
||||
{} 6+0x10
|
||||
|
||||
# Complement 2of2: `and` should return no samples
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.2, http_requests) and limit_ratio(-0.8, http_requests))
|
||||
# empty
|
||||
|
||||
# Complement below for [0.5, -0.5]
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) or limit_ratio(-0.5, http_requests))
|
||||
{} 6+0x10
|
||||
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.5, http_requests) and limit_ratio(-0.5, http_requests))
|
||||
# empty
|
||||
|
||||
# Complement below for [0.8, -0.2]
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) or limit_ratio(-0.2, http_requests))
|
||||
{} 6+0x10
|
||||
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(0.8, http_requests) and limit_ratio(-0.2, http_requests))
|
||||
# empty
|
||||
|
||||
# Complement below for [some_ratio, 1.0 - some_ratio], some_ratio derived from time(),
|
||||
# using a small prime number to avoid rounded ratio values, and a small set of them.
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) or limit_ratio(1.0 - (time() % 17/17), http_requests))
|
||||
{} 6+0x10
|
||||
|
||||
eval range from 0 to 50m step 5m count(limit_ratio(time() % 17/17, http_requests) and limit_ratio(1.0 - (time() % 17/17), http_requests))
|
||||
# empty
|
||||
|
||||
# Poor man's normality check: ok (loaded samples follow a nice linearity over labels and time)
|
||||
# The check giving: 1 (i.e. true)
|
||||
eval range from 0 to 50m step 5m abs(avg(limit_ratio(0.5, http_requests)) - avg(limit_ratio(-0.5, http_requests))) <= bool stddev(http_requests)
|
||||
{} 1+0x10
|
||||
|
|
@ -375,10 +375,10 @@ load 10m
|
|||
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
|
||||
|
||||
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
|
||||
{} NaN
|
||||
{} Inf
|
||||
|
||||
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
|
||||
{} NaN
|
||||
{} Inf
|
||||
|
||||
clear
|
||||
|
||||
|
|
|
@ -73,9 +73,11 @@ type Options struct {
|
|||
// Option used by downstream scraper users like OpenTelemetry Collector
|
||||
// to help lookup metric metadata. Should be false for Prometheus.
|
||||
PassMetadataInContext bool
|
||||
// Option to enable the experimental in-memory metadata storage and append
|
||||
// metadata to the WAL.
|
||||
EnableMetadataStorage bool
|
||||
// Option to enable appending of scraped Metadata to the TSDB/other appenders. Individual appenders
|
||||
// can decide what to do with metadata, but for practical purposes this flag exists so that metadata
|
||||
// can be written to the WAL and thus read for remote write.
|
||||
// TODO: implement some form of metadata storage
|
||||
AppendMetadata bool
|
||||
// Option to increase the interval used by scrape manager to throttle target groups updates.
|
||||
DiscoveryReloadInterval model.Duration
|
||||
// Option to enable the ingestion of the created timestamp as a synthetic zero sample.
|
||||
|
|
|
@ -34,6 +34,7 @@ type scrapeMetrics struct {
|
|||
targetScrapePoolExceededTargetLimit prometheus.Counter
|
||||
targetScrapePoolTargetLimit *prometheus.GaugeVec
|
||||
targetScrapePoolTargetsAdded *prometheus.GaugeVec
|
||||
targetScrapePoolSymbolTableItems *prometheus.GaugeVec
|
||||
targetSyncIntervalLength *prometheus.SummaryVec
|
||||
targetSyncFailed *prometheus.CounterVec
|
||||
|
||||
|
@ -129,6 +130,13 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
|
|||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetScrapePoolSymbolTableItems = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "prometheus_target_scrape_pool_symboltable_items",
|
||||
Help: "Current number of symbols in table for this scrape pool.",
|
||||
},
|
||||
[]string{"scrape_job"},
|
||||
)
|
||||
sm.targetScrapePoolSyncsCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "prometheus_target_scrape_pool_sync_total",
|
||||
|
@ -234,6 +242,7 @@ func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
|
|||
sm.targetScrapePoolExceededTargetLimit,
|
||||
sm.targetScrapePoolTargetLimit,
|
||||
sm.targetScrapePoolTargetsAdded,
|
||||
sm.targetScrapePoolSymbolTableItems,
|
||||
sm.targetSyncFailed,
|
||||
// Used by targetScraper.
|
||||
sm.targetScrapeExceededBodySizeLimit,
|
||||
|
@ -274,6 +283,7 @@ func (sm *scrapeMetrics) Unregister() {
|
|||
sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit)
|
||||
sm.reg.Unregister(sm.targetScrapePoolTargetLimit)
|
||||
sm.reg.Unregister(sm.targetScrapePoolTargetsAdded)
|
||||
sm.reg.Unregister(sm.targetScrapePoolSymbolTableItems)
|
||||
sm.reg.Unregister(sm.targetSyncFailed)
|
||||
sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit)
|
||||
sm.reg.Unregister(sm.targetScrapeCacheFlushForced)
|
||||
|
|
|
@ -181,7 +181,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
|
|||
options.EnableNativeHistogramsIngestion,
|
||||
options.EnableCreatedTimestampZeroIngestion,
|
||||
options.ExtraMetrics,
|
||||
options.EnableMetadataStorage,
|
||||
options.AppendMetadata,
|
||||
opts.target,
|
||||
options.PassMetadataInContext,
|
||||
metrics,
|
||||
|
@ -246,6 +246,7 @@ func (sp *scrapePool) stop() {
|
|||
sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetScrapePoolSymbolTableItems.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName)
|
||||
sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName)
|
||||
}
|
||||
|
@ -273,6 +274,15 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
|
||||
sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit))
|
||||
|
||||
sp.restartLoops(reuseCache)
|
||||
oldClient.CloseIdleConnections()
|
||||
sp.metrics.targetReloadIntervalLength.WithLabelValues(time.Duration(sp.config.ScrapeInterval).String()).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sp *scrapePool) restartLoops(reuseCache bool) {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
interval = time.Duration(sp.config.ScrapeInterval)
|
||||
|
@ -313,7 +323,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
client: sp.client,
|
||||
timeout: timeout,
|
||||
bodySizeLimit: bodySizeLimit,
|
||||
acceptHeader: acceptHeader(cfg.ScrapeProtocols),
|
||||
acceptHeader: acceptHeader(sp.config.ScrapeProtocols),
|
||||
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
|
||||
}
|
||||
newLoop = sp.newLoop(scrapeLoopOptions{
|
||||
|
@ -352,11 +362,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
sp.targetMtx.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
oldClient.CloseIdleConnections()
|
||||
sp.metrics.targetReloadIntervalLength.WithLabelValues(interval.String()).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
)
|
||||
}
|
||||
|
||||
// Must be called with sp.mtx held.
|
||||
func (sp *scrapePool) checkSymbolTable() {
|
||||
// Here we take steps to clear out the symbol table if it has grown a lot.
|
||||
// After waiting some time for things to settle, we take the size of the symbol-table.
|
||||
// If, after some more time, the table has grown to twice that size, we start a new one.
|
||||
|
@ -367,11 +376,10 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error {
|
|||
} else if sp.symbolTable.Len() > 2*sp.initialSymbolTableLen {
|
||||
sp.symbolTable = labels.NewSymbolTable()
|
||||
sp.initialSymbolTableLen = 0
|
||||
sp.restartLoops(false) // To drop all caches.
|
||||
}
|
||||
sp.lastSymbolTableCheck = time.Now()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync converts target groups into actual scrape targets and synchronizes
|
||||
|
@ -408,8 +416,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
|
|||
}
|
||||
}
|
||||
}
|
||||
sp.metrics.targetScrapePoolSymbolTableItems.WithLabelValues(sp.config.JobName).Set(float64(sp.symbolTable.Len()))
|
||||
sp.targetMtx.Unlock()
|
||||
sp.sync(all)
|
||||
sp.checkSymbolTable()
|
||||
|
||||
sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe(
|
||||
time.Since(start).Seconds(),
|
||||
|
|
|
@ -10,8 +10,9 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then
|
|||
exit 255
|
||||
fi
|
||||
|
||||
# TODO(bwplotka): Move to buf, this is not OSS agnostic, likely won't work locally.
|
||||
if ! [[ $(protoc --version) =~ "3.15.8" ]]; then
|
||||
echo "could not find protoc 3.15.8, is it installed + in PATH?"
|
||||
echo "could not find protoc 3.15.8, is it installed + in PATH? Consider commenting out this check for local flow"
|
||||
exit 255
|
||||
fi
|
||||
|
||||
|
@ -40,6 +41,9 @@ for dir in ${DIRS}; do
|
|||
-I="${PROM_PATH}" \
|
||||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||
./*.proto
|
||||
protoc --gogofast_out=plugins=grpc:. -I=. \
|
||||
-I="${GOGOPROTO_PATH}" \
|
||||
./io/prometheus/write/v2/*.proto
|
||||
protoc --gogofast_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,paths=source_relative:. -I=. \
|
||||
-I="${GOGOPROTO_PATH}" \
|
||||
./io/prometheus/client/*.proto
|
||||
|
|
|
@ -181,7 +181,8 @@ func TestFanoutErrors(t *testing.T) {
|
|||
require.NotEmpty(t, ss.Warnings(), "warnings expected")
|
||||
w := ss.Warnings()
|
||||
require.Error(t, w.AsErrors()[0])
|
||||
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
|
||||
warn, _ := w.AsStrings("", 0, 0)
|
||||
require.Equal(t, tc.warning.Error(), warn[0])
|
||||
}
|
||||
})
|
||||
t.Run("chunks", func(t *testing.T) {
|
||||
|
@ -207,7 +208,8 @@ func TestFanoutErrors(t *testing.T) {
|
|||
require.NotEmpty(t, ss.Warnings(), "warnings expected")
|
||||
w := ss.Warnings()
|
||||
require.Error(t, w.AsErrors()[0])
|
||||
require.Equal(t, tc.warning.Error(), w.AsStrings("", 0)[0])
|
||||
warn, _ := w.AsStrings("", 0, 0)
|
||||
require.Equal(t, tc.warning.Error(), warn[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -35,13 +35,40 @@ import (
|
|||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/storage/remote/azuread"
|
||||
)
|
||||
|
||||
const maxErrMsgLen = 1024
|
||||
|
||||
var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
const (
|
||||
RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version"
|
||||
RemoteWriteVersion1HeaderValue = "0.1.0"
|
||||
RemoteWriteVersion20HeaderValue = "2.0.0"
|
||||
appProtoContentType = "application/x-protobuf"
|
||||
)
|
||||
|
||||
// Compression represents the encoding. Currently remote storage supports only
|
||||
// one, but we experiment with more, thus leaving the compression scaffolding
|
||||
// for now.
|
||||
// NOTE(bwplotka): Keeping it public, as a non-stable help for importers to use.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
// SnappyBlockCompression represents https://github.com/google/snappy/blob/2c94e11145f0b7b184b831577c93e5a41c4c0346/format_description.txt
|
||||
SnappyBlockCompression Compression = "snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
// UserAgent represents Prometheus version to use for user agent header.
|
||||
UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
remoteWriteContentTypeHeaders = map[config.RemoteWriteProtoMsg]string{
|
||||
config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec.
|
||||
config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request",
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
remoteReadQueriesTotal = prometheus.NewCounterVec(
|
||||
|
@ -93,6 +120,9 @@ type Client struct {
|
|||
readQueries prometheus.Gauge
|
||||
readQueriesTotal *prometheus.CounterVec
|
||||
readQueriesDuration prometheus.Observer
|
||||
|
||||
writeProtoMsg config.RemoteWriteProtoMsg
|
||||
writeCompression Compression // Not exposed by ClientConfig for now.
|
||||
}
|
||||
|
||||
// ClientConfig configures a client.
|
||||
|
@ -104,6 +134,7 @@ type ClientConfig struct {
|
|||
AzureADConfig *azuread.AzureADConfig
|
||||
Headers map[string]string
|
||||
RetryOnRateLimit bool
|
||||
WriteProtoMsg config.RemoteWriteProtoMsg
|
||||
}
|
||||
|
||||
// ReadClient uses the SAMPLES method of remote read to read series samples from remote server.
|
||||
|
@ -162,14 +193,20 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) {
|
|||
}
|
||||
}
|
||||
|
||||
httpClient.Transport = otelhttp.NewTransport(t)
|
||||
writeProtoMsg := config.RemoteWriteProtoMsgV1
|
||||
if conf.WriteProtoMsg != "" {
|
||||
writeProtoMsg = conf.WriteProtoMsg
|
||||
}
|
||||
|
||||
httpClient.Transport = otelhttp.NewTransport(t)
|
||||
return &Client{
|
||||
remoteName: name,
|
||||
urlString: conf.URL.String(),
|
||||
Client: httpClient,
|
||||
retryOnRateLimit: conf.RetryOnRateLimit,
|
||||
timeout: time.Duration(conf.Timeout),
|
||||
writeProtoMsg: writeProtoMsg,
|
||||
writeCompression: SnappyBlockCompression,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -206,10 +243,16 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
|
|||
return err
|
||||
}
|
||||
|
||||
httpReq.Header.Add("Content-Encoding", "snappy")
|
||||
httpReq.Header.Set("Content-Type", "application/x-protobuf")
|
||||
httpReq.Header.Add("Content-Encoding", string(c.writeCompression))
|
||||
httpReq.Header.Set("Content-Type", remoteWriteContentTypeHeaders[c.writeProtoMsg])
|
||||
httpReq.Header.Set("User-Agent", UserAgent)
|
||||
httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0")
|
||||
if c.writeProtoMsg == config.RemoteWriteProtoMsgV1 {
|
||||
// Compatibility mode for 1.0.
|
||||
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion1HeaderValue)
|
||||
} else {
|
||||
httpReq.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
}
|
||||
|
||||
if attempt > 0 {
|
||||
httpReq.Header.Set("Retry-Attempt", strconv.Itoa(attempt))
|
||||
}
|
||||
|
@ -265,12 +308,12 @@ func retryAfterDuration(t string) model.Duration {
|
|||
}
|
||||
|
||||
// Name uniquely identifies the client.
|
||||
func (c Client) Name() string {
|
||||
func (c *Client) Name() string {
|
||||
return c.remoteName
|
||||
}
|
||||
|
||||
// Endpoint is the remote read or write endpoint.
|
||||
func (c Client) Endpoint() string {
|
||||
func (c *Client) Endpoint() string {
|
||||
return c.urlString
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
@ -30,10 +29,10 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
|
@ -153,10 +152,10 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
|||
})
|
||||
case chunkenc.ValHistogram:
|
||||
ts, h := iter.AtHistogram(nil)
|
||||
histograms = append(histograms, HistogramToHistogramProto(ts, h))
|
||||
histograms = append(histograms, prompb.FromIntHistogram(ts, h))
|
||||
case chunkenc.ValFloatHistogram:
|
||||
ts, fh := iter.AtFloatHistogram(nil)
|
||||
histograms = append(histograms, FloatHistogramToHistogramProto(ts, fh))
|
||||
histograms = append(histograms, prompb.FromFloatHistogram(ts, fh))
|
||||
default:
|
||||
return nil, ss.Warnings(), fmt.Errorf("unrecognized value type: %s", valType)
|
||||
}
|
||||
|
@ -166,7 +165,7 @@ func ToQueryResult(ss storage.SeriesSet, sampleLimit int) (*prompb.QueryResult,
|
|||
}
|
||||
|
||||
resp.Timeseries = append(resp.Timeseries, &prompb.TimeSeries{
|
||||
Labels: LabelsToLabelsProto(series.Labels(), nil),
|
||||
Labels: prompb.FromLabels(series.Labels(), nil),
|
||||
Samples: samples,
|
||||
Histograms: histograms,
|
||||
})
|
||||
|
@ -182,7 +181,7 @@ func FromQueryResult(sortSeries bool, res *prompb.QueryResult) storage.SeriesSet
|
|||
if err := validateLabelsAndMetricName(ts.Labels); err != nil {
|
||||
return errSeriesSet{err: err}
|
||||
}
|
||||
lbls := LabelProtosToLabels(&b, ts.Labels)
|
||||
lbls := ts.ToLabels(&b, nil)
|
||||
series = append(series, &concreteSeries{labels: lbls, floats: ts.Samples, histograms: ts.Histograms})
|
||||
}
|
||||
|
||||
|
@ -235,7 +234,7 @@ func StreamChunkedReadResponses(
|
|||
for ss.Next() {
|
||||
series := ss.At()
|
||||
iter = series.Iterator(iter)
|
||||
lbls = MergeLabels(LabelsToLabelsProto(series.Labels(), lbls), sortedExternalLabels)
|
||||
lbls = MergeLabels(prompb.FromLabels(series.Labels(), lbls), sortedExternalLabels)
|
||||
|
||||
maxDataLength := maxBytesInFrame
|
||||
for _, lbl := range lbls {
|
||||
|
@ -481,21 +480,16 @@ func (c *concreteSeriesIterator) AtHistogram(*histogram.Histogram) (int64, *hist
|
|||
panic("iterator is not on an integer histogram sample")
|
||||
}
|
||||
h := c.series.histograms[c.histogramsCur]
|
||||
return h.Timestamp, HistogramProtoToHistogram(h)
|
||||
return h.Timestamp, h.ToIntHistogram()
|
||||
}
|
||||
|
||||
// AtFloatHistogram implements chunkenc.Iterator.
|
||||
func (c *concreteSeriesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
|
||||
switch c.curValType {
|
||||
case chunkenc.ValHistogram:
|
||||
if c.curValType == chunkenc.ValHistogram || c.curValType == chunkenc.ValFloatHistogram {
|
||||
fh := c.series.histograms[c.histogramsCur]
|
||||
return fh.Timestamp, HistogramProtoToFloatHistogram(fh)
|
||||
case chunkenc.ValFloatHistogram:
|
||||
fh := c.series.histograms[c.histogramsCur]
|
||||
return fh.Timestamp, FloatHistogramProtoToFloatHistogram(fh)
|
||||
default:
|
||||
panic("iterator is not on a histogram sample")
|
||||
return fh.Timestamp, fh.ToFloatHistogram() // integer will be auto-converted.
|
||||
}
|
||||
panic("iterator is not on a histogram sample")
|
||||
}
|
||||
|
||||
// AtT implements chunkenc.Iterator.
|
||||
|
@ -618,141 +612,6 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func exemplarProtoToExemplar(b *labels.ScratchBuilder, ep prompb.Exemplar) exemplar.Exemplar {
|
||||
timestamp := ep.Timestamp
|
||||
|
||||
return exemplar.Exemplar{
|
||||
Labels: LabelProtosToLabels(b, ep.Labels),
|
||||
Value: ep.Value,
|
||||
Ts: timestamp,
|
||||
HasTs: timestamp != 0,
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramProtoToHistogram extracts a (normal integer) Histogram from the
|
||||
// provided proto message. The caller has to make sure that the proto message
|
||||
// represents an integer histogram and not a float histogram, or it panics.
|
||||
func HistogramProtoToHistogram(hp prompb.Histogram) *histogram.Histogram {
|
||||
if hp.IsFloatHistogram() {
|
||||
panic("HistogramProtoToHistogram called with a float histogram")
|
||||
}
|
||||
return &histogram.Histogram{
|
||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: hp.GetZeroCountInt(),
|
||||
Count: hp.GetCountInt(),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: hp.GetPositiveDeltas(),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: hp.GetNegativeDeltas(),
|
||||
}
|
||||
}
|
||||
|
||||
// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the
|
||||
// provided proto message to a Float Histogram. The caller has to make sure that
|
||||
// the proto message represents a float histogram and not an integer histogram,
|
||||
// or it panics.
|
||||
func FloatHistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
|
||||
if !hp.IsFloatHistogram() {
|
||||
panic("FloatHistogramProtoToFloatHistogram called with an integer histogram")
|
||||
}
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: hp.GetZeroCountFloat(),
|
||||
Count: hp.GetCountFloat(),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: hp.GetPositiveCounts(),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: hp.GetNegativeCounts(),
|
||||
}
|
||||
}
|
||||
|
||||
// HistogramProtoToFloatHistogram extracts and converts a (normal integer) histogram from the provided proto message
|
||||
// to a float histogram. The caller has to make sure that the proto message represents an integer histogram and not a
|
||||
// float histogram, or it panics.
|
||||
func HistogramProtoToFloatHistogram(hp prompb.Histogram) *histogram.FloatHistogram {
|
||||
if hp.IsFloatHistogram() {
|
||||
panic("HistogramProtoToFloatHistogram called with a float histogram")
|
||||
}
|
||||
return &histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.CounterResetHint(hp.ResetHint),
|
||||
Schema: hp.Schema,
|
||||
ZeroThreshold: hp.ZeroThreshold,
|
||||
ZeroCount: float64(hp.GetZeroCountInt()),
|
||||
Count: float64(hp.GetCountInt()),
|
||||
Sum: hp.Sum,
|
||||
PositiveSpans: spansProtoToSpans(hp.GetPositiveSpans()),
|
||||
PositiveBuckets: deltasToCounts(hp.GetPositiveDeltas()),
|
||||
NegativeSpans: spansProtoToSpans(hp.GetNegativeSpans()),
|
||||
NegativeBuckets: deltasToCounts(hp.GetNegativeDeltas()),
|
||||
}
|
||||
}
|
||||
|
||||
func spansProtoToSpans(s []prompb.BucketSpan) []histogram.Span {
|
||||
spans := make([]histogram.Span, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
func deltasToCounts(deltas []int64) []float64 {
|
||||
counts := make([]float64, len(deltas))
|
||||
var cur float64
|
||||
for i, d := range deltas {
|
||||
cur += float64(d)
|
||||
counts[i] = cur
|
||||
}
|
||||
return counts
|
||||
}
|
||||
|
||||
func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: h.Count},
|
||||
Sum: h.Sum,
|
||||
Schema: h.Schema,
|
||||
ZeroThreshold: h.ZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(h.NegativeSpans),
|
||||
NegativeDeltas: h.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(h.PositiveSpans),
|
||||
PositiveDeltas: h.PositiveBuckets,
|
||||
ResetHint: prompb.Histogram_ResetHint(h.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountFloat{CountFloat: fh.Count},
|
||||
Sum: fh.Sum,
|
||||
Schema: fh.Schema,
|
||||
ZeroThreshold: fh.ZeroThreshold,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount},
|
||||
NegativeSpans: spansToSpansProto(fh.NegativeSpans),
|
||||
NegativeCounts: fh.NegativeBuckets,
|
||||
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
|
||||
PositiveCounts: fh.PositiveBuckets,
|
||||
ResetHint: prompb.Histogram_ResetHint(fh.CounterResetHint),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan {
|
||||
spans := make([]prompb.BucketSpan, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
spans[i] = prompb.BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
|
||||
}
|
||||
|
||||
return spans
|
||||
}
|
||||
|
||||
// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric.
|
||||
func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
||||
metric := make(model.Metric, len(labelPairs))
|
||||
|
@ -762,43 +621,9 @@ func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric {
|
|||
return metric
|
||||
}
|
||||
|
||||
// LabelProtosToLabels transforms prompb labels into labels. The labels builder
|
||||
// will be used to build the returned labels.
|
||||
func LabelProtosToLabels(b *labels.ScratchBuilder, labelPairs []prompb.Label) labels.Labels {
|
||||
b.Reset()
|
||||
for _, l := range labelPairs {
|
||||
b.Add(l.Name, l.Value)
|
||||
}
|
||||
b.Sort()
|
||||
return b.Labels()
|
||||
}
|
||||
|
||||
// LabelsToLabelsProto transforms labels into prompb labels. The buffer slice
|
||||
// will be used to avoid allocations if it is big enough to store the labels.
|
||||
func LabelsToLabelsProto(lbls labels.Labels, buf []prompb.Label) []prompb.Label {
|
||||
result := buf[:0]
|
||||
lbls.Range(func(l labels.Label) {
|
||||
result = append(result, prompb.Label{
|
||||
Name: l.Name,
|
||||
Value: l.Value,
|
||||
})
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
// metricTypeToMetricTypeProto transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum.
|
||||
func metricTypeToMetricTypeProto(t model.MetricType) prompb.MetricMetadata_MetricType {
|
||||
mt := strings.ToUpper(string(t))
|
||||
v, ok := prompb.MetricMetadata_MetricType_value[mt]
|
||||
if !ok {
|
||||
return prompb.MetricMetadata_UNKNOWN
|
||||
}
|
||||
|
||||
return prompb.MetricMetadata_MetricType(v)
|
||||
}
|
||||
|
||||
// DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling
|
||||
// snappy decompression.
|
||||
// Used also by documentation/examples/remote_storage.
|
||||
func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
||||
compressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
|
@ -818,6 +643,28 @@ func DecodeWriteRequest(r io.Reader) (*prompb.WriteRequest, error) {
|
|||
return &req, nil
|
||||
}
|
||||
|
||||
// DecodeWriteV2Request from an io.Reader into a writev2.Request, handling
|
||||
// snappy decompression.
|
||||
// Used also by documentation/examples/remote_storage.
|
||||
func DecodeWriteV2Request(r io.Reader) (*writev2.Request, error) {
|
||||
compressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqBuf, err := snappy.Decode(nil, compressed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var req writev2.Request
|
||||
if err := proto.Unmarshal(reqBuf, &req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func DecodeOTLPWriteRequest(r *http.Request) (pmetricotlp.ExportRequest, error) {
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
var decoderFunc func(buf []byte) (pmetricotlp.ExportRequest, error)
|
||||
|
|
|
@ -19,13 +19,16 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
|
@ -57,7 +60,7 @@ var writeRequestFixture = &prompb.WriteRequest{
|
|||
},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
|
||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
|
||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||
},
|
||||
{
|
||||
Labels: []prompb.Label{
|
||||
|
@ -69,11 +72,59 @@ var writeRequestFixture = &prompb.WriteRequest{
|
|||
},
|
||||
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
|
||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(2, &testHistogram), FloatHistogramToHistogramProto(3, testHistogram.ToFloat(nil))},
|
||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
writeV2RequestSeries1Metadata = metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "Test gauge for test purposes",
|
||||
Unit: "Maybe op/sec who knows (:",
|
||||
}
|
||||
writeV2RequestSeries2Metadata = metadata.Metadata{
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Test counter for test purposes",
|
||||
}
|
||||
|
||||
// writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation.
|
||||
writeV2RequestFixture = func() *writev2.Request {
|
||||
st := writev2.NewSymbolTable()
|
||||
b := labels.NewScratchBuilder(0)
|
||||
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
|
||||
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
|
||||
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
|
||||
return &writev2.Request{
|
||||
Timeseries: []writev2.TimeSeries{
|
||||
{
|
||||
LabelsRefs: labelRefs,
|
||||
Metadata: writev2.Metadata{
|
||||
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2.
|
||||
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
|
||||
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
|
||||
},
|
||||
Samples: []writev2.Sample{{Value: 1, Timestamp: 0}},
|
||||
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}},
|
||||
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||
},
|
||||
{
|
||||
LabelsRefs: labelRefs,
|
||||
Metadata: writev2.Metadata{
|
||||
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2.
|
||||
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
|
||||
// No unit.
|
||||
},
|
||||
Samples: []writev2.Sample{{Value: 2, Timestamp: 1}},
|
||||
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}},
|
||||
Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
|
||||
},
|
||||
},
|
||||
Symbols: st.Symbols(),
|
||||
}
|
||||
}()
|
||||
)
|
||||
|
||||
func TestValidateLabelsAndMetricName(t *testing.T) {
|
||||
tests := []struct {
|
||||
input []prompb.Label
|
||||
|
@ -268,7 +319,7 @@ func TestConcreteSeriesIterator_HistogramSamples(t *testing.T) {
|
|||
} else {
|
||||
ts = int64(i)
|
||||
}
|
||||
histProtos[i] = HistogramToHistogramProto(ts, h)
|
||||
histProtos[i] = prompb.FromIntHistogram(ts, h)
|
||||
}
|
||||
series := &concreteSeries{
|
||||
labels: labels.FromStrings("foo", "bar"),
|
||||
|
@ -319,9 +370,9 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
|
|||
histProtos := make([]prompb.Histogram, len(histograms))
|
||||
for i, h := range histograms {
|
||||
if i < 10 {
|
||||
histProtos[i] = HistogramToHistogramProto(int64(i+1), h)
|
||||
histProtos[i] = prompb.FromIntHistogram(int64(i+1), h)
|
||||
} else {
|
||||
histProtos[i] = HistogramToHistogramProto(int64(i+6), h)
|
||||
histProtos[i] = prompb.FromIntHistogram(int64(i+6), h)
|
||||
}
|
||||
}
|
||||
series := &concreteSeries{
|
||||
|
@ -401,7 +452,7 @@ func TestConcreteSeriesIterator_FloatAndHistogramSamples(t *testing.T) {
|
|||
require.Equal(t, chunkenc.ValHistogram, it.Next())
|
||||
ts, fh = it.AtFloatHistogram(nil)
|
||||
require.Equal(t, int64(17), ts)
|
||||
expected := HistogramProtoToFloatHistogram(HistogramToHistogramProto(int64(17), histograms[11]))
|
||||
expected := prompb.FromIntHistogram(int64(17), histograms[11]).ToFloatHistogram()
|
||||
require.Equal(t, expected, fh)
|
||||
|
||||
// Keep calling Next() until the end.
|
||||
|
@ -485,39 +536,8 @@ func TestMergeLabels(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMetricTypeToMetricTypeProto(t *testing.T) {
|
||||
tc := []struct {
|
||||
desc string
|
||||
input model.MetricType
|
||||
expected prompb.MetricMetadata_MetricType
|
||||
}{
|
||||
{
|
||||
desc: "with a single-word metric",
|
||||
input: model.MetricTypeCounter,
|
||||
expected: prompb.MetricMetadata_COUNTER,
|
||||
},
|
||||
{
|
||||
desc: "with a two-word metric",
|
||||
input: model.MetricTypeStateset,
|
||||
expected: prompb.MetricMetadata_STATESET,
|
||||
},
|
||||
{
|
||||
desc: "with an unknown metric",
|
||||
input: "not-known",
|
||||
expected: prompb.MetricMetadata_UNKNOWN,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tc {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
m := metricTypeToMetricTypeProto(tt.input)
|
||||
require.Equal(t, tt.expected, m)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeWriteRequest(t *testing.T) {
|
||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
|
||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := DecodeWriteRequest(bytes.NewReader(buf))
|
||||
|
@ -525,212 +545,18 @@ func TestDecodeWriteRequest(t *testing.T) {
|
|||
require.Equal(t, writeRequestFixture, actual)
|
||||
}
|
||||
|
||||
func TestNilHistogramProto(*testing.T) {
|
||||
// This function will panic if it impromperly handles nil
|
||||
// values, causing the test to fail.
|
||||
HistogramProtoToHistogram(prompb.Histogram{})
|
||||
HistogramProtoToFloatHistogram(prompb.Histogram{})
|
||||
}
|
||||
func TestDecodeWriteV2Request(t *testing.T) {
|
||||
buf, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
func exampleHistogram() histogram.Histogram {
|
||||
return histogram.Histogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 19,
|
||||
Sum: 2.7,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 5},
|
||||
{Offset: 1, Length: 0},
|
||||
{Offset: 0, Length: 1},
|
||||
},
|
||||
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
|
||||
}
|
||||
}
|
||||
|
||||
func exampleHistogramProto() prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountInt{CountInt: 19},
|
||||
Sum: 2.7,
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0},
|
||||
NegativeSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 5,
|
||||
},
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 0,
|
||||
},
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
NegativeDeltas: []int64{1, 2, -2, 1, -1, 0},
|
||||
PositiveSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 4,
|
||||
},
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 0,
|
||||
},
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
},
|
||||
},
|
||||
PositiveDeltas: []int64{1, 2, -2, 1, -1, 0, 0},
|
||||
ResetHint: prompb.Histogram_GAUGE,
|
||||
Timestamp: 1337,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramToProtoConvert(t *testing.T) {
|
||||
tests := []struct {
|
||||
input histogram.CounterResetHint
|
||||
expected prompb.Histogram_ResetHint
|
||||
}{
|
||||
{
|
||||
input: histogram.UnknownCounterReset,
|
||||
expected: prompb.Histogram_UNKNOWN,
|
||||
},
|
||||
{
|
||||
input: histogram.CounterReset,
|
||||
expected: prompb.Histogram_YES,
|
||||
},
|
||||
{
|
||||
input: histogram.NotCounterReset,
|
||||
expected: prompb.Histogram_NO,
|
||||
},
|
||||
{
|
||||
input: histogram.GaugeType,
|
||||
expected: prompb.Histogram_GAUGE,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
h := exampleHistogram()
|
||||
h.CounterResetHint = test.input
|
||||
p := exampleHistogramProto()
|
||||
p.ResetHint = test.expected
|
||||
|
||||
require.Equal(t, p, HistogramToHistogramProto(1337, &h))
|
||||
|
||||
require.Equal(t, h, *HistogramProtoToHistogram(p))
|
||||
}
|
||||
}
|
||||
|
||||
func exampleFloatHistogram() histogram.FloatHistogram {
|
||||
return histogram.FloatHistogram{
|
||||
CounterResetHint: histogram.GaugeType,
|
||||
Schema: 0,
|
||||
Count: 19,
|
||||
Sum: 2.7,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 4},
|
||||
{Offset: 0, Length: 0},
|
||||
{Offset: 0, Length: 3},
|
||||
},
|
||||
PositiveBuckets: []float64{1, 2, -2, 1, -1, 0, 0},
|
||||
NegativeSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 5},
|
||||
{Offset: 1, Length: 0},
|
||||
{Offset: 0, Length: 1},
|
||||
},
|
||||
NegativeBuckets: []float64{1, 2, -2, 1, -1, 0},
|
||||
}
|
||||
}
|
||||
|
||||
func exampleFloatHistogramProto() prompb.Histogram {
|
||||
return prompb.Histogram{
|
||||
Count: &prompb.Histogram_CountFloat{CountFloat: 19},
|
||||
Sum: 2.7,
|
||||
Schema: 0,
|
||||
ZeroThreshold: 0,
|
||||
ZeroCount: &prompb.Histogram_ZeroCountFloat{ZeroCountFloat: 0},
|
||||
NegativeSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 5,
|
||||
},
|
||||
{
|
||||
Offset: 1,
|
||||
Length: 0,
|
||||
},
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 1,
|
||||
},
|
||||
},
|
||||
NegativeCounts: []float64{1, 2, -2, 1, -1, 0},
|
||||
PositiveSpans: []prompb.BucketSpan{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 4,
|
||||
},
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 0,
|
||||
},
|
||||
{
|
||||
Offset: 0,
|
||||
Length: 3,
|
||||
},
|
||||
},
|
||||
PositiveCounts: []float64{1, 2, -2, 1, -1, 0, 0},
|
||||
ResetHint: prompb.Histogram_GAUGE,
|
||||
Timestamp: 1337,
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatHistogramToProtoConvert(t *testing.T) {
|
||||
tests := []struct {
|
||||
input histogram.CounterResetHint
|
||||
expected prompb.Histogram_ResetHint
|
||||
}{
|
||||
{
|
||||
input: histogram.UnknownCounterReset,
|
||||
expected: prompb.Histogram_UNKNOWN,
|
||||
},
|
||||
{
|
||||
input: histogram.CounterReset,
|
||||
expected: prompb.Histogram_YES,
|
||||
},
|
||||
{
|
||||
input: histogram.NotCounterReset,
|
||||
expected: prompb.Histogram_NO,
|
||||
},
|
||||
{
|
||||
input: histogram.GaugeType,
|
||||
expected: prompb.Histogram_GAUGE,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
h := exampleFloatHistogram()
|
||||
h.CounterResetHint = test.input
|
||||
p := exampleFloatHistogramProto()
|
||||
p.ResetHint = test.expected
|
||||
|
||||
require.Equal(t, p, FloatHistogramToHistogramProto(1337, &h))
|
||||
|
||||
require.Equal(t, h, *FloatHistogramProtoToFloatHistogram(p))
|
||||
}
|
||||
actual, err := DecodeWriteV2Request(bytes.NewReader(buf))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, writeV2RequestFixture, actual)
|
||||
}
|
||||
|
||||
func TestStreamResponse(t *testing.T) {
|
||||
lbs1 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
|
||||
lbs2 := LabelsToLabelsProto(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
|
||||
lbs1 := prompb.FromLabels(labels.FromStrings("instance", "localhost1", "job", "demo1"), nil)
|
||||
lbs2 := prompb.FromLabels(labels.FromStrings("instance", "localhost2", "job", "demo2"), nil)
|
||||
chunk := prompb.Chunk{
|
||||
Type: prompb.Chunk_XOR,
|
||||
Data: make([]byte, 100),
|
||||
|
@ -802,7 +628,7 @@ func (c *mockChunkSeriesSet) Next() bool {
|
|||
|
||||
func (c *mockChunkSeriesSet) At() storage.ChunkSeries {
|
||||
return &storage.ChunkSeriesEntry{
|
||||
Lset: LabelProtosToLabels(&c.builder, c.chunkedSeries[c.index].Labels),
|
||||
Lset: c.chunkedSeries[c.index].ToLabels(&c.builder, nil),
|
||||
ChunkIteratorFn: func(chunks.Iterator) chunks.Iterator {
|
||||
return &mockChunkIterator{
|
||||
chunks: c.chunkedSeries[c.index].Chunks,
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
|
||||
// MetadataAppender is an interface used by the Metadata Watcher to send metadata, It is read from the scrape manager, on to somewhere else.
|
||||
type MetadataAppender interface {
|
||||
AppendMetadata(context.Context, []scrape.MetricMetadata)
|
||||
AppendWatcherMetadata(context.Context, []scrape.MetricMetadata)
|
||||
}
|
||||
|
||||
// Watchable represents from where we fetch active targets for metadata.
|
||||
|
@ -146,7 +146,7 @@ func (mw *MetadataWatcher) collect() {
|
|||
}
|
||||
|
||||
// Blocks until the metadata is sent to the remote write endpoint or hardShutdownContext is expired.
|
||||
mw.writer.AppendMetadata(mw.hardShutdownCtx, metadata)
|
||||
mw.writer.AppendWatcherMetadata(mw.hardShutdownCtx, metadata)
|
||||
}
|
||||
|
||||
func (mw *MetadataWatcher) ready() bool {
|
||||
|
|
|
@ -57,7 +57,7 @@ type writeMetadataToMock struct {
|
|||
metadataAppended int
|
||||
}
|
||||
|
||||
func (mwtm *writeMetadataToMock) AppendMetadata(_ context.Context, m []scrape.MetricMetadata) {
|
||||
func (mwtm *writeMetadataToMock) AppendWatcherMetadata(_ context.Context, m []scrape.MetricMetadata) {
|
||||
mwtm.metadataAppended += len(m)
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
|
||||
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
|
||||
var unitMap = map[string]string{
|
||||
|
||||
// Time
|
||||
"d": "days",
|
||||
"h": "hours",
|
||||
|
@ -111,7 +110,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
|
|||
|
||||
// Build a normalized name for the specified metric
|
||||
func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||
|
||||
// Split metric name in "tokens" (remove all non-alphanumeric)
|
||||
nameTokens := strings.FieldsFunc(
|
||||
metric.Name(),
|
||||
|
|
|
@ -19,7 +19,6 @@ package prometheus
|
|||
import "strings"
|
||||
|
||||
var wordToUCUM = map[string]string{
|
||||
|
||||
// Time
|
||||
"days": "d",
|
||||
"hours": "h",
|
||||
|
|
|
@ -182,12 +182,13 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa
|
|||
if i+1 >= len(extras) {
|
||||
break
|
||||
}
|
||||
_, found := l[extras[i]]
|
||||
|
||||
name := extras[i]
|
||||
_, found := l[name]
|
||||
if found && logOnOverwrite {
|
||||
log.Println("label " + extras[i] + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||
log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.")
|
||||
}
|
||||
// internal labels should be maintained
|
||||
name := extras[i]
|
||||
if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") {
|
||||
name = prometheustranslator.NormalizeLabel(name)
|
||||
}
|
||||
|
@ -219,6 +220,13 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// addHistogramDataPoints adds OTel histogram data points to the corresponding Prometheus time series
|
||||
// as classical histogram samples.
|
||||
//
|
||||
// Note that we can't convert to native histograms, since these have exponential buckets and don't line up
|
||||
// with the user defined bucket boundaries of non-exponential OTel histograms.
|
||||
// However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets:
|
||||
// https://github.com/prometheus/prometheus/issues/13485.
|
||||
func (c *PrometheusConverter) addHistogramDataPoints(dataPoints pmetric.HistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, baseName string) {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
|
|
|
@ -30,10 +30,18 @@ import (
|
|||
|
||||
const defaultZeroThreshold = 1e-128
|
||||
|
||||
// addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series
|
||||
// as native histogram samples.
|
||||
func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice,
|
||||
resource pcommon.Resource, settings Settings, baseName string) error {
|
||||
resource pcommon.Resource, settings Settings, promName string) error {
|
||||
for x := 0; x < dataPoints.Len(); x++ {
|
||||
pt := dataPoints.At(x)
|
||||
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lbls := createAttributes(
|
||||
resource,
|
||||
pt.Attributes(),
|
||||
|
@ -41,14 +49,9 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
|||
nil,
|
||||
true,
|
||||
model.MetricNameLabel,
|
||||
baseName,
|
||||
promName,
|
||||
)
|
||||
ts, _ := c.getOrCreateTimeSeries(lbls)
|
||||
|
||||
histogram, err := exponentialToNativeHistogram(pt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts.Histograms = append(ts.Histograms, histogram)
|
||||
|
||||
exemplars := getPromExemplars[pmetric.ExponentialHistogramDataPoint](pt)
|
||||
|
@ -58,7 +61,7 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr
|
|||
return nil
|
||||
}
|
||||
|
||||
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
|
||||
// exponentialToNativeHistogram translates OTel Exponential Histogram data point
|
||||
// to Prometheus Native Histogram.
|
||||
func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) {
|
||||
scale := p.Scale()
|
||||
|
|
|
@ -36,9 +36,11 @@ import (
|
|||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
|
@ -389,7 +391,7 @@ func (m *queueManagerMetrics) unregister() {
|
|||
// external timeseries database.
|
||||
type WriteClient interface {
|
||||
// Store stores the given samples in the remote storage.
|
||||
Store(context.Context, []byte, int) error
|
||||
Store(ctx context.Context, req []byte, retryAttempt int) error
|
||||
// Name uniquely identifies the remote storage.
|
||||
Name() string
|
||||
// Endpoint is the remote read or write endpoint for the storage client.
|
||||
|
@ -418,11 +420,14 @@ type QueueManager struct {
|
|||
|
||||
clientMtx sync.RWMutex
|
||||
storeClient WriteClient
|
||||
protoMsg config.RemoteWriteProtoMsg
|
||||
enc Compression
|
||||
|
||||
seriesMtx sync.Mutex // Covers seriesLabels, droppedSeries and builder.
|
||||
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
||||
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
||||
builder *labels.Builder
|
||||
seriesMtx sync.Mutex // Covers seriesLabels, seriesMetadata, droppedSeries and builder.
|
||||
seriesLabels map[chunks.HeadSeriesRef]labels.Labels
|
||||
seriesMetadata map[chunks.HeadSeriesRef]*metadata.Metadata
|
||||
droppedSeries map[chunks.HeadSeriesRef]struct{}
|
||||
builder *labels.Builder
|
||||
|
||||
seriesSegmentMtx sync.Mutex // Covers seriesSegmentIndexes - if you also lock seriesMtx, take seriesMtx first.
|
||||
seriesSegmentIndexes map[chunks.HeadSeriesRef]int
|
||||
|
@ -463,6 +468,7 @@ func NewQueueManager(
|
|||
sm ReadyScrapeManager,
|
||||
enableExemplarRemoteWrite bool,
|
||||
enableNativeHistogramRemoteWrite bool,
|
||||
protoMsg config.RemoteWriteProtoMsg,
|
||||
) *QueueManager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
|
@ -487,6 +493,7 @@ func NewQueueManager(
|
|||
sendNativeHistograms: enableNativeHistogramRemoteWrite,
|
||||
|
||||
seriesLabels: make(map[chunks.HeadSeriesRef]labels.Labels),
|
||||
seriesMetadata: make(map[chunks.HeadSeriesRef]*metadata.Metadata),
|
||||
seriesSegmentIndexes: make(map[chunks.HeadSeriesRef]int),
|
||||
droppedSeries: make(map[chunks.HeadSeriesRef]struct{}),
|
||||
builder: labels.NewBuilder(labels.EmptyLabels()),
|
||||
|
@ -503,9 +510,26 @@ func NewQueueManager(
|
|||
metrics: metrics,
|
||||
interner: interner,
|
||||
highestRecvTimestamp: highestRecvTimestamp,
|
||||
|
||||
protoMsg: protoMsg,
|
||||
enc: SnappyBlockCompression, // Hardcoded for now, but scaffolding exists for likely future use.
|
||||
}
|
||||
|
||||
walMetadata := false
|
||||
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
walMetadata = true
|
||||
}
|
||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata)
|
||||
|
||||
// The current MetadataWatcher implementation is mutually exclusive
|
||||
// with the new approach, which stores metadata as WAL records and
|
||||
// ships them alongside series. If both mechanisms are set, the new one
|
||||
// takes precedence by implicitly disabling the older one.
|
||||
if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request")
|
||||
t.mcfg.Send = false
|
||||
}
|
||||
|
||||
t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite)
|
||||
if t.mcfg.Send {
|
||||
t.metadataWatcher = NewMetadataWatcher(logger, sm, client.Name(), t, t.mcfg.SendInterval, flushDeadline)
|
||||
}
|
||||
|
@ -514,14 +538,21 @@ func NewQueueManager(
|
|||
return t
|
||||
}
|
||||
|
||||
// AppendMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
|
||||
func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
|
||||
// AppendWatcherMetadata sends metadata to the remote storage. Metadata is sent in batches, but is not parallelized.
|
||||
// This is only used for the metadata_config.send setting and 1.x Remote Write.
|
||||
func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scrape.MetricMetadata) {
|
||||
// no op for any newer proto format, which will cache metadata sent to it from the WAL watcher.
|
||||
if t.protoMsg != config.RemoteWriteProtoMsgV1 {
|
||||
return
|
||||
}
|
||||
|
||||
// 1.X will still get metadata in batches.
|
||||
mm := make([]prompb.MetricMetadata, 0, len(metadata))
|
||||
for _, entry := range metadata {
|
||||
mm = append(mm, prompb.MetricMetadata{
|
||||
MetricFamilyName: entry.Metric,
|
||||
Help: entry.Help,
|
||||
Type: metricTypeToMetricTypeProto(entry.Type),
|
||||
Type: prompb.FromMetadataType(entry.Type),
|
||||
Unit: entry.Unit,
|
||||
})
|
||||
}
|
||||
|
@ -542,8 +573,8 @@ func (t *QueueManager) AppendMetadata(ctx context.Context, metadata []scrape.Met
|
|||
}
|
||||
|
||||
func (t *QueueManager) sendMetadataWithBackoff(ctx context.Context, metadata []prompb.MetricMetadata, pBuf *proto.Buffer) error {
|
||||
// Build the WriteRequest with no samples.
|
||||
req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil)
|
||||
// Build the WriteRequest with no samples (v1 flow).
|
||||
req, _, _, err := buildWriteRequest(t.logger, nil, metadata, pBuf, nil, nil, t.enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -629,6 +660,36 @@ func isTimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sam
|
|||
}
|
||||
}
|
||||
|
||||
func isV2TimeSeriesOldFilter(metrics *queueManagerMetrics, baseTime time.Time, sampleAgeLimit time.Duration) func(ts writev2.TimeSeries) bool {
|
||||
return func(ts writev2.TimeSeries) bool {
|
||||
if sampleAgeLimit == 0 {
|
||||
// If sampleAgeLimit is unset, then we never skip samples due to their age.
|
||||
return false
|
||||
}
|
||||
switch {
|
||||
// Only the first element should be set in the series, therefore we only check the first element.
|
||||
case len(ts.Samples) > 0:
|
||||
if isSampleOld(baseTime, sampleAgeLimit, ts.Samples[0].Timestamp) {
|
||||
metrics.droppedSamplesTotal.WithLabelValues(reasonTooOld).Inc()
|
||||
return true
|
||||
}
|
||||
case len(ts.Histograms) > 0:
|
||||
if isSampleOld(baseTime, sampleAgeLimit, ts.Histograms[0].Timestamp) {
|
||||
metrics.droppedHistogramsTotal.WithLabelValues(reasonTooOld).Inc()
|
||||
return true
|
||||
}
|
||||
case len(ts.Exemplars) > 0:
|
||||
if isSampleOld(baseTime, sampleAgeLimit, ts.Exemplars[0].Timestamp) {
|
||||
metrics.droppedExemplarsTotal.WithLabelValues(reasonTooOld).Inc()
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Append queues a sample to be sent to the remote storage. Blocks until all samples are
|
||||
// enqueued on their shards or a shutdown signal is received.
|
||||
func (t *QueueManager) Append(samples []record.RefSample) bool {
|
||||
|
@ -652,6 +713,9 @@ outer:
|
|||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
// TODO(cstyan): Handle or at least log an error if no metadata is found.
|
||||
// See https://github.com/prometheus/prometheus/issues/14405
|
||||
meta := t.seriesMetadata[s.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
// Start with a very small backoff. This should not be t.cfg.MinBackoff
|
||||
// as it can happen without errors, and we want to pickup work after
|
||||
|
@ -666,6 +730,7 @@ outer:
|
|||
}
|
||||
if t.shards.enqueue(s.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: s.T,
|
||||
value: s.V,
|
||||
sType: tSample,
|
||||
|
@ -711,6 +776,7 @@ outer:
|
|||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[e.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
// This will only loop if the queues are being resharded.
|
||||
backoff := t.cfg.MinBackoff
|
||||
|
@ -722,6 +788,7 @@ outer:
|
|||
}
|
||||
if t.shards.enqueue(e.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: e.T,
|
||||
value: e.V,
|
||||
exemplarLabels: e.Labels,
|
||||
|
@ -765,6 +832,7 @@ outer:
|
|||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[h.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
|
@ -776,6 +844,7 @@ outer:
|
|||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: h.T,
|
||||
histogram: h.H,
|
||||
sType: tHistogram,
|
||||
|
@ -818,6 +887,7 @@ outer:
|
|||
t.seriesMtx.Unlock()
|
||||
continue
|
||||
}
|
||||
meta := t.seriesMetadata[h.Ref]
|
||||
t.seriesMtx.Unlock()
|
||||
|
||||
backoff := model.Duration(5 * time.Millisecond)
|
||||
|
@ -829,6 +899,7 @@ outer:
|
|||
}
|
||||
if t.shards.enqueue(h.Ref, timeSeries{
|
||||
seriesLabels: lbls,
|
||||
metadata: meta,
|
||||
timestamp: h.T,
|
||||
floatHistogram: h.FH,
|
||||
sType: tFloatHistogram,
|
||||
|
@ -925,6 +996,23 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) {
|
|||
}
|
||||
}
|
||||
|
||||
// StoreMetadata keeps track of known series' metadata for lookups when sending samples to remote.
|
||||
func (t *QueueManager) StoreMetadata(meta []record.RefMetadata) {
|
||||
if t.protoMsg == config.RemoteWriteProtoMsgV1 {
|
||||
return
|
||||
}
|
||||
|
||||
t.seriesMtx.Lock()
|
||||
defer t.seriesMtx.Unlock()
|
||||
for _, m := range meta {
|
||||
t.seriesMetadata[m.Ref] = &metadata.Metadata{
|
||||
Type: record.ToMetricType(m.Type),
|
||||
Unit: m.Unit,
|
||||
Help: m.Help,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSeriesSegment updates the segment number held against the series,
|
||||
// so we can trim older ones in SeriesReset.
|
||||
func (t *QueueManager) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
||||
|
@ -950,6 +1038,7 @@ func (t *QueueManager) SeriesReset(index int) {
|
|||
delete(t.seriesSegmentIndexes, k)
|
||||
t.releaseLabels(t.seriesLabels[k])
|
||||
delete(t.seriesLabels, k)
|
||||
delete(t.seriesMetadata, k)
|
||||
delete(t.droppedSeries, k)
|
||||
}
|
||||
}
|
||||
|
@ -1165,6 +1254,7 @@ type shards struct {
|
|||
samplesDroppedOnHardShutdown atomic.Uint32
|
||||
exemplarsDroppedOnHardShutdown atomic.Uint32
|
||||
histogramsDroppedOnHardShutdown atomic.Uint32
|
||||
metadataDroppedOnHardShutdown atomic.Uint32
|
||||
}
|
||||
|
||||
// start the shards; must be called before any call to enqueue.
|
||||
|
@ -1193,6 +1283,7 @@ func (s *shards) start(n int) {
|
|||
s.samplesDroppedOnHardShutdown.Store(0)
|
||||
s.exemplarsDroppedOnHardShutdown.Store(0)
|
||||
s.histogramsDroppedOnHardShutdown.Store(0)
|
||||
s.metadataDroppedOnHardShutdown.Store(0)
|
||||
for i := 0; i < n; i++ {
|
||||
go s.runShard(hardShutdownCtx, i, newQueues[i])
|
||||
}
|
||||
|
@ -1245,7 +1336,6 @@ func (s *shards) stop() {
|
|||
func (s *shards) enqueue(ref chunks.HeadSeriesRef, data timeSeries) bool {
|
||||
s.mtx.RLock()
|
||||
defer s.mtx.RUnlock()
|
||||
|
||||
shard := uint64(ref) % uint64(len(s.queues))
|
||||
select {
|
||||
case <-s.softShutdown:
|
||||
|
@ -1288,6 +1378,7 @@ type timeSeries struct {
|
|||
value float64
|
||||
histogram *histogram.Histogram
|
||||
floatHistogram *histogram.FloatHistogram
|
||||
metadata *metadata.Metadata
|
||||
timestamp int64
|
||||
exemplarLabels labels.Labels
|
||||
// The type of series: sample, exemplar, or histogram.
|
||||
|
@ -1301,6 +1392,7 @@ const (
|
|||
tExemplar
|
||||
tHistogram
|
||||
tFloatHistogram
|
||||
tMetadata
|
||||
)
|
||||
|
||||
func newQueue(batchSize, capacity int) *queue {
|
||||
|
@ -1324,6 +1416,10 @@ func newQueue(batchSize, capacity int) *queue {
|
|||
func (q *queue) Append(datum timeSeries) bool {
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
// TODO(cstyan): Check if metadata now means we've reduced the total # of samples
|
||||
// we can batch together here, and if so find a way to not include metadata
|
||||
// in the batch size calculation.
|
||||
// See https://github.com/prometheus/prometheus/issues/14405
|
||||
q.batch = append(q.batch, datum)
|
||||
if len(q.batch) == cap(q.batch) {
|
||||
select {
|
||||
|
@ -1347,7 +1443,6 @@ func (q *queue) Chan() <-chan []timeSeries {
|
|||
func (q *queue) Batch() []timeSeries {
|
||||
q.batchMtx.Lock()
|
||||
defer q.batchMtx.Unlock()
|
||||
|
||||
select {
|
||||
case batch := <-q.batchQueue:
|
||||
return batch
|
||||
|
@ -1419,19 +1514,23 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
}()
|
||||
|
||||
shardNum := strconv.Itoa(shardID)
|
||||
symbolTable := writev2.NewSymbolTable()
|
||||
|
||||
// Send batches of at most MaxSamplesPerSend samples to the remote storage.
|
||||
// If we have fewer samples than that, flush them out after a deadline anyways.
|
||||
var (
|
||||
max = s.qm.cfg.MaxSamplesPerSend
|
||||
|
||||
pBuf = proto.NewBuffer(nil)
|
||||
buf []byte
|
||||
pBuf = proto.NewBuffer(nil)
|
||||
pBufRaw []byte
|
||||
buf []byte
|
||||
)
|
||||
// TODO(@tpaschalis) Should we also raise the max if we have WAL metadata?
|
||||
if s.qm.sendExemplars {
|
||||
max += int(float64(max) * 0.1)
|
||||
}
|
||||
|
||||
// TODO: Dry all of this, we should make an interface/generic for the timeseries type.
|
||||
batchQueue := queue.Chan()
|
||||
pendingData := make([]prompb.TimeSeries, max)
|
||||
for i := range pendingData {
|
||||
|
@ -1440,6 +1539,10 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
pendingData[i].Exemplars = []prompb.Exemplar{{}}
|
||||
}
|
||||
}
|
||||
pendingDataV2 := make([]writev2.TimeSeries, max)
|
||||
for i := range pendingDataV2 {
|
||||
pendingDataV2[i].Samples = []writev2.Sample{{}}
|
||||
}
|
||||
|
||||
timer := time.NewTimer(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
stop := func() {
|
||||
|
@ -1452,6 +1555,24 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
}
|
||||
defer stop()
|
||||
|
||||
sendBatch := func(batch []timeSeries, protoMsg config.RemoteWriteProtoMsg, enc Compression, timer bool) {
|
||||
switch protoMsg {
|
||||
case config.RemoteWriteProtoMsgV1:
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
if timer {
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
|
||||
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
|
||||
}
|
||||
_ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc)
|
||||
case config.RemoteWriteProtoMsgV2:
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata := populateV2TimeSeries(&symbolTable, batch, pendingDataV2, s.qm.sendExemplars, s.qm.sendNativeHistograms)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
_ = s.sendV2Samples(ctx, pendingDataV2[:n], symbolTable.Symbols(), nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata, &pBufRaw, &buf, enc)
|
||||
symbolTable.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -1475,10 +1596,11 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
if !ok {
|
||||
return
|
||||
}
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||
|
||||
sendBatch(batch, s.qm.protoMsg, s.qm.enc, false)
|
||||
// TODO(bwplotka): Previously the return was between popular and send.
|
||||
// Consider this when DRY-ing https://github.com/prometheus/prometheus/issues/14409
|
||||
queue.ReturnForReuse(batch)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||
|
||||
stop()
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
|
@ -1486,11 +1608,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
case <-timer.C:
|
||||
batch := queue.Batch()
|
||||
if len(batch) > 0 {
|
||||
nPendingSamples, nPendingExemplars, nPendingHistograms := s.populateTimeSeries(batch, pendingData)
|
||||
n := nPendingSamples + nPendingExemplars + nPendingHistograms
|
||||
level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples,
|
||||
"exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms)
|
||||
s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf)
|
||||
sendBatch(batch, s.qm.protoMsg, s.qm.enc, true)
|
||||
}
|
||||
queue.ReturnForReuse(batch)
|
||||
timer.Reset(time.Duration(s.qm.cfg.BatchSendDeadline))
|
||||
|
@ -1498,21 +1616,22 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries) (int, int, int) {
|
||||
func populateTimeSeries(batch []timeSeries, pendingData []prompb.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int) {
|
||||
var nPendingSamples, nPendingExemplars, nPendingHistograms int
|
||||
for nPending, d := range batch {
|
||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||
if s.qm.sendExemplars {
|
||||
if sendExemplars {
|
||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||
}
|
||||
if s.qm.sendNativeHistograms {
|
||||
if sendNativeHistograms {
|
||||
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||
}
|
||||
|
||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||
pendingData[nPending].Labels = LabelsToLabelsProto(d.seriesLabels, pendingData[nPending].Labels)
|
||||
pendingData[nPending].Labels = prompb.FromLabels(d.seriesLabels, pendingData[nPending].Labels)
|
||||
|
||||
switch d.sType {
|
||||
case tSample:
|
||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, prompb.Sample{
|
||||
|
@ -1522,25 +1641,39 @@ func (s *shards) populateTimeSeries(batch []timeSeries, pendingData []prompb.Tim
|
|||
nPendingSamples++
|
||||
case tExemplar:
|
||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, prompb.Exemplar{
|
||||
Labels: LabelsToLabelsProto(d.exemplarLabels, nil),
|
||||
Labels: prompb.FromLabels(d.exemplarLabels, nil),
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingExemplars++
|
||||
case tHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, HistogramToHistogramProto(d.timestamp, d.histogram))
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromIntHistogram(d.timestamp, d.histogram))
|
||||
nPendingHistograms++
|
||||
case tFloatHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, FloatHistogramToHistogramProto(d.timestamp, d.floatHistogram))
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, prompb.FromFloatHistogram(d.timestamp, d.floatHistogram))
|
||||
nPendingHistograms++
|
||||
}
|
||||
}
|
||||
return nPendingSamples, nPendingExemplars, nPendingHistograms
|
||||
}
|
||||
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) {
|
||||
func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||
begin := time.Now()
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, pBuf, buf)
|
||||
err := s.sendSamplesWithBackoff(ctx, samples, sampleCount, exemplarCount, histogramCount, 0, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, 0, time.Since(begin))
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(bwplotka): DRY this (have one logic for both v1 and v2).
|
||||
// See https://github.com/prometheus/prometheus/issues/14409
|
||||
func (s *shards) sendV2Samples(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||
begin := time.Now()
|
||||
err := s.sendV2SamplesWithBackoff(ctx, samples, labels, sampleCount, exemplarCount, histogramCount, metadataCount, pBuf, buf, enc)
|
||||
s.updateMetrics(ctx, err, sampleCount, exemplarCount, histogramCount, metadataCount, time.Since(begin))
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exemplarCount, histogramCount, metadataCount int, duration time.Duration) {
|
||||
if err != nil {
|
||||
level.Error(s.qm.logger).Log("msg", "non-recoverable error", "count", sampleCount, "exemplarCount", exemplarCount, "histogramCount", histogramCount, "err", err)
|
||||
s.qm.metrics.failedSamplesTotal.Add(float64(sampleCount))
|
||||
|
@ -1550,8 +1683,8 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
|||
|
||||
// These counters are used to calculate the dynamic sharding, and as such
|
||||
// should be maintained irrespective of success or failure.
|
||||
s.qm.dataOut.incr(int64(len(samples)))
|
||||
s.qm.dataOutDuration.incr(int64(time.Since(begin)))
|
||||
s.qm.dataOut.incr(int64(sampleCount + exemplarCount + histogramCount + metadataCount))
|
||||
s.qm.dataOutDuration.incr(int64(duration))
|
||||
s.qm.lastSendTimestamp.Store(time.Now().Unix())
|
||||
// Pending samples/exemplars/histograms also should be subtracted, as an error means
|
||||
// they will not be retried.
|
||||
|
@ -1564,9 +1697,9 @@ func (s *shards) sendSamples(ctx context.Context, samples []prompb.TimeSeries, s
|
|||
}
|
||||
|
||||
// sendSamples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount int, pBuf *proto.Buffer, buf *[]byte) error {
|
||||
func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.TimeSeries, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf *proto.Buffer, buf *[]byte, enc Compression) error {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, *buf, nil)
|
||||
req, highest, lowest, err := buildWriteRequest(s.qm.logger, samples, nil, pBuf, buf, nil, enc)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
|
@ -1590,8 +1723,9 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
samples,
|
||||
nil,
|
||||
pBuf,
|
||||
*buf,
|
||||
buf,
|
||||
isTimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
||||
enc,
|
||||
)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
|
@ -1622,6 +1756,7 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||
err := s.qm.client().Store(ctx, *buf, try)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
|
||||
|
@ -1652,6 +1787,148 @@ func (s *shards) sendSamplesWithBackoff(ctx context.Context, samples []prompb.Ti
|
|||
return err
|
||||
}
|
||||
|
||||
// sendV2Samples to the remote storage with backoff for recoverable errors.
|
||||
func (s *shards) sendV2SamplesWithBackoff(ctx context.Context, samples []writev2.TimeSeries, labels []string, sampleCount, exemplarCount, histogramCount, metadataCount int, pBuf, buf *[]byte, enc Compression) error {
|
||||
// Build the WriteRequest with no metadata.
|
||||
req, highest, lowest, err := buildV2WriteRequest(s.qm.logger, samples, labels, pBuf, buf, nil, enc)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
// Failing to build the write request is non-recoverable, since it will
|
||||
// only error if marshaling the proto to bytes fails.
|
||||
return err
|
||||
}
|
||||
|
||||
reqSize := len(req)
|
||||
*buf = req
|
||||
|
||||
// An anonymous function allows us to defer the completion of our per-try spans
|
||||
// without causing a memory leak, and it has the nice effect of not propagating any
|
||||
// parameters for sendSamplesWithBackoff/3.
|
||||
attemptStore := func(try int) error {
|
||||
currentTime := time.Now()
|
||||
lowest := s.qm.buildRequestLimitTimestamp.Load()
|
||||
if isSampleOld(currentTime, time.Duration(s.qm.cfg.SampleAgeLimit), lowest) {
|
||||
// This will filter out old samples during retries.
|
||||
req, _, lowest, err := buildV2WriteRequest(
|
||||
s.qm.logger,
|
||||
samples,
|
||||
labels,
|
||||
pBuf,
|
||||
buf,
|
||||
isV2TimeSeriesOldFilter(s.qm.metrics, currentTime, time.Duration(s.qm.cfg.SampleAgeLimit)),
|
||||
enc,
|
||||
)
|
||||
s.qm.buildRequestLimitTimestamp.Store(lowest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*buf = req
|
||||
}
|
||||
|
||||
ctx, span := otel.Tracer("").Start(ctx, "Remote Send Batch")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(
|
||||
attribute.Int("request_size", reqSize),
|
||||
attribute.Int("samples", sampleCount),
|
||||
attribute.Int("try", try),
|
||||
attribute.String("remote_name", s.qm.storeClient.Name()),
|
||||
attribute.String("remote_url", s.qm.storeClient.Endpoint()),
|
||||
)
|
||||
|
||||
if exemplarCount > 0 {
|
||||
span.SetAttributes(attribute.Int("exemplars", exemplarCount))
|
||||
}
|
||||
if histogramCount > 0 {
|
||||
span.SetAttributes(attribute.Int("histograms", histogramCount))
|
||||
}
|
||||
|
||||
begin := time.Now()
|
||||
s.qm.metrics.samplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.exemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.histogramsTotal.Add(float64(histogramCount))
|
||||
s.qm.metrics.metadataTotal.Add(float64(metadataCount))
|
||||
err := s.qm.client().Store(ctx, *buf, try)
|
||||
s.qm.metrics.sentBatchDuration.Observe(time.Since(begin).Seconds())
|
||||
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
onRetry := func() {
|
||||
s.qm.metrics.retriedSamplesTotal.Add(float64(sampleCount))
|
||||
s.qm.metrics.retriedExemplarsTotal.Add(float64(exemplarCount))
|
||||
s.qm.metrics.retriedHistogramsTotal.Add(float64(histogramCount))
|
||||
}
|
||||
|
||||
err = s.qm.sendWriteRequestWithBackoff(ctx, attemptStore, onRetry)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// When there is resharding, we cancel the context for this queue, which means the data is not sent.
|
||||
// So we exit early to not update the metrics.
|
||||
return err
|
||||
}
|
||||
|
||||
s.qm.metrics.sentBytesTotal.Add(float64(reqSize))
|
||||
s.qm.metrics.highestSentTimestamp.Set(float64(highest / 1000))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries, pendingData []writev2.TimeSeries, sendExemplars, sendNativeHistograms bool) (int, int, int, int) {
|
||||
var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int
|
||||
for nPending, d := range batch {
|
||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||
// todo: should we also safeguard against empty metadata here?
|
||||
if d.metadata != nil {
|
||||
pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type)
|
||||
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
|
||||
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit)
|
||||
nPendingMetadata++
|
||||
}
|
||||
|
||||
if sendExemplars {
|
||||
pendingData[nPending].Exemplars = pendingData[nPending].Exemplars[:0]
|
||||
}
|
||||
if sendNativeHistograms {
|
||||
pendingData[nPending].Histograms = pendingData[nPending].Histograms[:0]
|
||||
}
|
||||
|
||||
// Number of pending samples is limited by the fact that sendSamples (via sendSamplesWithBackoff)
|
||||
// retries endlessly, so once we reach max samples, if we can never send to the endpoint we'll
|
||||
// stop reading from the queue. This makes it safe to reference pendingSamples by index.
|
||||
pendingData[nPending].LabelsRefs = symbolTable.SymbolizeLabels(d.seriesLabels, pendingData[nPending].LabelsRefs)
|
||||
switch d.sType {
|
||||
case tSample:
|
||||
pendingData[nPending].Samples = append(pendingData[nPending].Samples, writev2.Sample{
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingSamples++
|
||||
case tExemplar:
|
||||
pendingData[nPending].Exemplars = append(pendingData[nPending].Exemplars, writev2.Exemplar{
|
||||
LabelsRefs: symbolTable.SymbolizeLabels(d.exemplarLabels, nil), // TODO: optimize, reuse slice
|
||||
Value: d.value,
|
||||
Timestamp: d.timestamp,
|
||||
})
|
||||
nPendingExemplars++
|
||||
case tHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromIntHistogram(d.timestamp, d.histogram))
|
||||
nPendingHistograms++
|
||||
case tFloatHistogram:
|
||||
pendingData[nPending].Histograms = append(pendingData[nPending].Histograms, writev2.FromFloatHistogram(d.timestamp, d.floatHistogram))
|
||||
nPendingHistograms++
|
||||
case tMetadata:
|
||||
// TODO: log or return an error?
|
||||
// we shouldn't receive metadata type data here, it should already be inserted into the timeSeries
|
||||
}
|
||||
}
|
||||
return nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata
|
||||
}
|
||||
|
||||
func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt func(int) error, onRetry func()) error {
|
||||
backoff := t.cfg.MinBackoff
|
||||
sleepDuration := model.Duration(0)
|
||||
|
@ -1795,7 +2072,21 @@ func buildTimeSeries(timeSeries []prompb.TimeSeries, filter func(prompb.TimeSeri
|
|||
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
||||
}
|
||||
|
||||
func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf []byte, filter func(prompb.TimeSeries) bool) ([]byte, int64, int64, error) {
|
||||
func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed []byte, _ error) {
|
||||
switch enc {
|
||||
case SnappyBlockCompression:
|
||||
compressed = snappy.Encode(*tmpbuf, inp)
|
||||
if n := snappy.MaxEncodedLen(len(inp)); n > len(*tmpbuf) {
|
||||
// grow the buffer for the next time
|
||||
*tmpbuf = make([]byte, n)
|
||||
}
|
||||
return compressed, nil
|
||||
default:
|
||||
return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc)
|
||||
}
|
||||
}
|
||||
|
||||
func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
|
||||
highest, lowest, timeSeries,
|
||||
droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter)
|
||||
|
||||
|
@ -1821,8 +2112,105 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada
|
|||
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||
// buffer as long as possible.
|
||||
if buf != nil {
|
||||
buf = buf[0:cap(buf)]
|
||||
*buf = (*buf)[0:cap(*buf)]
|
||||
} else {
|
||||
buf = &[]byte{}
|
||||
}
|
||||
|
||||
compressed, err = compressPayload(buf, pBuf.Bytes(), enc)
|
||||
if err != nil {
|
||||
return nil, highest, lowest, err
|
||||
}
|
||||
compressed := snappy.Encode(buf, pBuf.Bytes())
|
||||
return compressed, highest, lowest, nil
|
||||
}
|
||||
|
||||
func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) {
|
||||
highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter)
|
||||
|
||||
if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 {
|
||||
level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms)
|
||||
}
|
||||
|
||||
req := &writev2.Request{
|
||||
Symbols: labels,
|
||||
Timeseries: timeSeries,
|
||||
}
|
||||
|
||||
if pBuf == nil {
|
||||
pBuf = &[]byte{} // For convenience in tests. Not efficient.
|
||||
}
|
||||
|
||||
data, err := req.OptimizedMarshal(*pBuf)
|
||||
if err != nil {
|
||||
return nil, highest, lowest, err
|
||||
}
|
||||
*pBuf = data
|
||||
|
||||
// snappy uses len() to see if it needs to allocate a new slice. Make the
|
||||
// buffer as long as possible.
|
||||
if buf != nil {
|
||||
*buf = (*buf)[0:cap(*buf)]
|
||||
} else {
|
||||
buf = &[]byte{}
|
||||
}
|
||||
|
||||
compressed, err = compressPayload(buf, data, enc)
|
||||
if err != nil {
|
||||
return nil, highest, lowest, err
|
||||
}
|
||||
return compressed, highest, lowest, nil
|
||||
}
|
||||
|
||||
func buildV2TimeSeries(timeSeries []writev2.TimeSeries, filter func(writev2.TimeSeries) bool) (int64, int64, []writev2.TimeSeries, int, int, int) {
|
||||
var highest int64
|
||||
var lowest int64
|
||||
var droppedSamples, droppedExemplars, droppedHistograms int
|
||||
|
||||
keepIdx := 0
|
||||
lowest = math.MaxInt64
|
||||
for i, ts := range timeSeries {
|
||||
if filter != nil && filter(ts) {
|
||||
if len(ts.Samples) > 0 {
|
||||
droppedSamples++
|
||||
}
|
||||
if len(ts.Exemplars) > 0 {
|
||||
droppedExemplars++
|
||||
}
|
||||
if len(ts.Histograms) > 0 {
|
||||
droppedHistograms++
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At the moment we only ever append a TimeSeries with a single sample or exemplar in it.
|
||||
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp > highest {
|
||||
highest = ts.Samples[0].Timestamp
|
||||
}
|
||||
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp > highest {
|
||||
highest = ts.Exemplars[0].Timestamp
|
||||
}
|
||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp > highest {
|
||||
highest = ts.Histograms[0].Timestamp
|
||||
}
|
||||
|
||||
// Get the lowest timestamp.
|
||||
if len(ts.Samples) > 0 && ts.Samples[0].Timestamp < lowest {
|
||||
lowest = ts.Samples[0].Timestamp
|
||||
}
|
||||
if len(ts.Exemplars) > 0 && ts.Exemplars[0].Timestamp < lowest {
|
||||
lowest = ts.Exemplars[0].Timestamp
|
||||
}
|
||||
if len(ts.Histograms) > 0 && ts.Histograms[0].Timestamp < lowest {
|
||||
lowest = ts.Histograms[0].Timestamp
|
||||
}
|
||||
if i != keepIdx {
|
||||
// We have to swap the kept timeseries with the one which should be dropped.
|
||||
// Copying any elements within timeSeries could cause data corruptions when reusing the slice in a next batch (shards.populateTimeSeries).
|
||||
timeSeries[keepIdx], timeSeries[i] = timeSeries[i], timeSeries[keepIdx]
|
||||
}
|
||||
keepIdx++
|
||||
}
|
||||
|
||||
timeSeries = timeSeries[:keepIdx]
|
||||
return highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -124,7 +124,7 @@ func TestSampledReadEndpoint(t *testing.T) {
|
|||
{Name: "d", Value: "e"},
|
||||
},
|
||||
Histograms: []prompb.Histogram{
|
||||
FloatHistogramToHistogramProto(0, tsdbutil.GenerateTestFloatHistogram(0)),
|
||||
prompb.FromFloatHistogram(0, tsdbutil.GenerateTestFloatHistogram(0)),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -92,7 +92,7 @@ func TestNoDuplicateReadConfigs(t *testing.T) {
|
|||
|
||||
for _, tc := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteReadConfigs: tc.cfgs,
|
||||
|
@ -172,12 +172,12 @@ func TestSeriesSetFilter(t *testing.T) {
|
|||
toRemove: []string{"foo"},
|
||||
in: &prompb.QueryResult{
|
||||
Timeseries: []*prompb.TimeSeries{
|
||||
{Labels: LabelsToLabelsProto(labels.FromStrings("foo", "bar", "a", "b"), nil)},
|
||||
{Labels: prompb.FromLabels(labels.FromStrings("foo", "bar", "a", "b"), nil)},
|
||||
},
|
||||
},
|
||||
expected: &prompb.QueryResult{
|
||||
Timeseries: []*prompb.TimeSeries{
|
||||
{Labels: LabelsToLabelsProto(labels.FromStrings("a", "b"), nil)},
|
||||
{Labels: prompb.FromLabels(labels.FromStrings("a", "b"), nil)},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -211,7 +211,7 @@ func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prom
|
|||
|
||||
q := &prompb.QueryResult{}
|
||||
for _, s := range c.store {
|
||||
l := LabelProtosToLabels(&c.b, s.Labels)
|
||||
l := s.ToLabels(&c.b, nil)
|
||||
var notMatch bool
|
||||
|
||||
for _, m := range matchers {
|
||||
|
|
|
@ -62,7 +62,7 @@ type Storage struct {
|
|||
}
|
||||
|
||||
// NewStorage returns a remote.Storage.
|
||||
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager) *Storage {
|
||||
func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal
|
|||
logger: logger,
|
||||
localStartTimeCallback: stCallback,
|
||||
}
|
||||
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm)
|
||||
s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL)
|
||||
return s
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
func TestStorageLifecycle(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
|
@ -56,7 +56,7 @@ func TestStorageLifecycle(t *testing.T) {
|
|||
func TestUpdateRemoteReadConfigs(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
|
@ -77,7 +77,7 @@ func TestUpdateRemoteReadConfigs(t *testing.T) {
|
|||
func TestFilterExternalLabels(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{
|
||||
|
@ -102,7 +102,7 @@ func TestFilterExternalLabels(t *testing.T) {
|
|||
func TestIgnoreExternalLabels(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewStorage(nil, nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{
|
||||
|
@ -154,7 +154,7 @@ func baseRemoteReadConfig(host string) *config.RemoteReadConfig {
|
|||
// ApplyConfig runs concurrently with Notify
|
||||
// See https://github.com/prometheus/prometheus/issues/12747
|
||||
func TestWriteStorageApplyConfigsDuringCommit(t *testing.T) {
|
||||
s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil)
|
||||
s := NewStorage(nil, nil, nil, t.TempDir(), defaultFlushDeadline, nil, false)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2000)
|
||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
|
@ -65,6 +66,7 @@ type WriteStorage struct {
|
|||
externalLabels labels.Labels
|
||||
dir string
|
||||
queues map[string]*QueueManager
|
||||
metadataInWAL bool
|
||||
samplesIn *ewmaRate
|
||||
flushDeadline time.Duration
|
||||
interner *pool
|
||||
|
@ -76,7 +78,7 @@ type WriteStorage struct {
|
|||
}
|
||||
|
||||
// NewWriteStorage creates and runs a WriteStorage.
|
||||
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager) *WriteStorage {
|
||||
func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -92,6 +94,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
|
|||
interner: newPool(),
|
||||
scraper: sm,
|
||||
quit: make(chan struct{}),
|
||||
metadataInWAL: metadataInWal,
|
||||
highestTimestamp: &maxTimestamp{
|
||||
Gauge: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
|
@ -145,6 +148,9 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
newQueues := make(map[string]*QueueManager)
|
||||
newHashes := []string{}
|
||||
for _, rwConf := range conf.RemoteWriteConfigs {
|
||||
if rwConf.ProtobufMessage == config.RemoteWriteProtoMsgV2 && !rws.metadataInWAL {
|
||||
return errors.New("invalid remote write configuration, if you are using remote write version 2.0 the `--enable-feature=metadata-wal-records` feature flag must be enabled")
|
||||
}
|
||||
hash, err := toHash(rwConf)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -165,6 +171,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
|
||||
c, err := NewWriteClient(name, &ClientConfig{
|
||||
URL: rwConf.URL,
|
||||
WriteProtoMsg: rwConf.ProtobufMessage,
|
||||
Timeout: rwConf.RemoteTimeout,
|
||||
HTTPClientConfig: rwConf.HTTPClientConfig,
|
||||
SigV4Config: rwConf.SigV4Config,
|
||||
|
@ -207,6 +214,7 @@ func (rws *WriteStorage) ApplyConfig(conf *config.Config) error {
|
|||
rws.scraper,
|
||||
rwConf.SendExemplars,
|
||||
rwConf.SendNativeHistograms,
|
||||
rwConf.ProtobufMessage,
|
||||
)
|
||||
// Keep track of which queues are new so we know which to start.
|
||||
newHashes = append(newHashes, hash)
|
||||
|
|
|
@ -17,19 +17,24 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
)
|
||||
|
@ -39,17 +44,23 @@ type writeHandler struct {
|
|||
appendable storage.Appendable
|
||||
|
||||
samplesWithInvalidLabelsTotal prometheus.Counter
|
||||
|
||||
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
|
||||
}
|
||||
|
||||
const maxAheadTime = 10 * time.Minute
|
||||
|
||||
// NewWriteHandler creates a http.Handler that accepts remote write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable) http.Handler {
|
||||
// NewWriteHandler creates a http.Handler that accepts remote write requests with
|
||||
// the given message in acceptedProtoMsgs and writes them to the provided appendable.
|
||||
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
|
||||
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
|
||||
for _, acc := range acceptedProtoMsgs {
|
||||
protoMsgs[acc] = struct{}{}
|
||||
}
|
||||
h := &writeHandler{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
acceptedProtoMsgs: protoMsgs,
|
||||
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "api",
|
||||
|
@ -63,15 +74,107 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
|
|||
return h
|
||||
}
|
||||
|
||||
func (h *writeHandler) parseProtoMsg(contentType string) (config.RemoteWriteProtoMsg, error) {
|
||||
contentType = strings.TrimSpace(contentType)
|
||||
|
||||
parts := strings.Split(contentType, ";")
|
||||
if parts[0] != appProtoContentType {
|
||||
return "", fmt.Errorf("expected %v as the first (media) part, got %v content-type", appProtoContentType, contentType)
|
||||
}
|
||||
// Parse potential https://www.rfc-editor.org/rfc/rfc9110#parameter
|
||||
for _, p := range parts[1:] {
|
||||
pair := strings.Split(p, "=")
|
||||
if len(pair) != 2 {
|
||||
return "", fmt.Errorf("as per https://www.rfc-editor.org/rfc/rfc9110#parameter expected parameters to be key-values, got %v in %v content-type", p, contentType)
|
||||
}
|
||||
if pair[0] == "proto" {
|
||||
ret := config.RemoteWriteProtoMsg(pair[1])
|
||||
if err := ret.Validate(); err != nil {
|
||||
return "", fmt.Errorf("got %v content type; %w", contentType, err)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
// No "proto=" parameter, assuming v1.
|
||||
return config.RemoteWriteProtoMsgV1, nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
req, err := DecodeWriteRequest(r.Body)
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
// Don't break yolo 1.0 clients if not needed. This is similar to what we did
|
||||
// before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
|
||||
// We could give http.StatusUnsupportedMediaType, but let's assume 1.0 message by default.
|
||||
contentType = appProtoContentType
|
||||
}
|
||||
|
||||
msg, err := h.parseProtoMsg(contentType)
|
||||
if err != nil {
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := h.acceptedProtoMsgs[msg]; !ok {
|
||||
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) {
|
||||
for k := range h.acceptedProtoMsgs {
|
||||
ret = append(ret, string(k))
|
||||
}
|
||||
return ret
|
||||
}())
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||
}
|
||||
|
||||
enc := r.Header.Get("Content-Encoding")
|
||||
if enc == "" {
|
||||
// Don't break yolo 1.0 clients if not needed. This is similar to what we did
|
||||
// before 2.0: https://github.com/prometheus/prometheus/blob/d78253319daa62c8f28ed47e40bafcad2dd8b586/storage/remote/write_handler.go#L62
|
||||
// We could give http.StatusUnsupportedMediaType, but let's assume snappy by default.
|
||||
} else if enc != string(SnappyBlockCompression) {
|
||||
err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression)
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
|
||||
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
|
||||
}
|
||||
|
||||
// Read the request body.
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
err = h.write(r.Context(), req)
|
||||
decompressed, err := snappy.Decode(nil, body)
|
||||
if err != nil {
|
||||
// TODO(bwplotka): Add more context to responded error?
|
||||
level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Now we have a decompressed buffer we can unmarshal it.
|
||||
switch msg {
|
||||
case config.RemoteWriteProtoMsgV1:
|
||||
var req prompb.WriteRequest
|
||||
if err := proto.Unmarshal(decompressed, &req); err != nil {
|
||||
// TODO(bwplotka): Add more context to responded error?
|
||||
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = h.write(r.Context(), &req)
|
||||
case config.RemoteWriteProtoMsgV2:
|
||||
var req writev2.Request
|
||||
if err := proto.Unmarshal(decompressed, &req); err != nil {
|
||||
// TODO(bwplotka): Add more context to responded error?
|
||||
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = h.writeV2(r.Context(), &req)
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
|
||||
|
@ -123,62 +226,27 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||
}()
|
||||
|
||||
b := labels.NewScratchBuilder(0)
|
||||
var exemplarErr error
|
||||
|
||||
for _, ts := range req.Timeseries {
|
||||
labels := LabelProtosToLabels(&b, ts.Labels)
|
||||
if !labels.IsValid() {
|
||||
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", labels.String())
|
||||
ls := ts.ToLabels(&b, nil)
|
||||
if !ls.IsValid() {
|
||||
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
|
||||
samplesWithInvalidLabels++
|
||||
continue
|
||||
}
|
||||
var ref storage.SeriesRef
|
||||
for _, s := range ts.Samples {
|
||||
ref, err = timeLimitApp.Append(ref, labels, s.Timestamp, s.Value)
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err := h.appendSamples(timeLimitApp, ts.Samples, ls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := exemplarProtoToExemplar(&b, ep)
|
||||
|
||||
_, exemplarErr = timeLimitApp.AppendExemplar(0, labels, e)
|
||||
exemplarErr = h.checkAppendExemplarError(exemplarErr, e, &outOfOrderExemplarErrs)
|
||||
if exemplarErr != nil {
|
||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors.
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr)
|
||||
}
|
||||
e := ep.ToExemplar(&b, nil)
|
||||
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
|
||||
}
|
||||
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
fhs := FloatHistogramProtoToFloatHistogram(hp)
|
||||
_, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, nil, fhs)
|
||||
} else {
|
||||
hs := HistogramProtoToHistogram(hp)
|
||||
_, err = timeLimitApp.AppendHistogram(0, labels, hp.Timestamp, hs, nil)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
err = h.appendHistograms(timeLimitApp, ts.Histograms, ls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,6 +260,149 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) {
|
||||
outOfOrderExemplarErrs := 0
|
||||
|
||||
timeLimitApp := &timeLimitAppender{
|
||||
Appender: h.appendable.Appender(ctx),
|
||||
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = timeLimitApp.Rollback()
|
||||
return
|
||||
}
|
||||
err = timeLimitApp.Commit()
|
||||
}()
|
||||
|
||||
b := labels.NewScratchBuilder(0)
|
||||
for _, ts := range req.Timeseries {
|
||||
ls := ts.ToLabels(&b, req.Symbols)
|
||||
|
||||
err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ep := range ts.Exemplars {
|
||||
e := ep.ToExemplar(&b, req.Symbols)
|
||||
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
|
||||
}
|
||||
|
||||
err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := ts.ToMetadata(req.Symbols)
|
||||
if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil {
|
||||
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if outOfOrderExemplarErrs > 0 {
|
||||
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) {
|
||||
_, err := app.AppendExemplar(0, labels, e)
|
||||
err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
|
||||
if err != nil {
|
||||
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
|
||||
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
|
||||
var ref storage.SeriesRef
|
||||
var err error
|
||||
for _, s := range ss {
|
||||
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
|
||||
var ref storage.SeriesRef
|
||||
var err error
|
||||
for _, s := range ss {
|
||||
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
|
||||
var err error
|
||||
for _, hp := range hh {
|
||||
if hp.IsFloatHistogram() {
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||
}
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
|
||||
var err error
|
||||
for _, hp := range hh {
|
||||
if hp.IsFloatHistogram() {
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
|
||||
} else {
|
||||
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
|
||||
}
|
||||
if err != nil {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
if unwrappedErr == nil {
|
||||
unwrappedErr = err
|
||||
}
|
||||
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
|
||||
// a note indicating its inclusion in the future.
|
||||
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
|
||||
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable) http.Handler {
|
||||
|
|
|
@ -30,25 +30,230 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/prompb"
|
||||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
func TestRemoteWriteHandler(t *testing.T) {
|
||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
|
||||
func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) {
|
||||
payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
reqHeaders map[string]string
|
||||
expectedCode int
|
||||
}{
|
||||
// Generally Prometheus 1.0 Receiver never checked for existence of the headers, so
|
||||
// we keep things permissive.
|
||||
{
|
||||
name: "correct PRW 1.0 headers",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
{
|
||||
name: "missing remote write version",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
},
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
{
|
||||
name: "no headers",
|
||||
reqHeaders: map[string]string{},
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
{
|
||||
name: "missing content-type",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
{
|
||||
name: "missing content-encoding",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
{
|
||||
name: "wrong content-type",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": "yolo",
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "wrong content-type2",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": appProtoContentType + ";proto=yolo",
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "not supported content-encoding",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV1],
|
||||
"Content-Encoding": "zstd",
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
for k, v := range tc.reqHeaders {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
out, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
_ = resp.Body.Close()
|
||||
require.Equal(t, tc.expectedCode, resp.StatusCode, string(out))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
|
||||
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
reqHeaders map[string]string
|
||||
expectedCode int
|
||||
}{
|
||||
{
|
||||
name: "correct PRW 2.0 headers",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusNoContent,
|
||||
},
|
||||
{
|
||||
name: "missing remote write version",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
},
|
||||
expectedCode: http.StatusNoContent, // We don't check for now.
|
||||
},
|
||||
{
|
||||
name: "no headers",
|
||||
reqHeaders: map[string]string{},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "missing content-type",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
// This only gives 415, because we explicitly only support 2.0. If we supported both
|
||||
// (default) it would be empty message parsed and ok response.
|
||||
// This is perhaps better, than 415 for previously working 1.0 flow with
|
||||
// no content-type.
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "missing content-encoding",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusNoContent, // Similar to 1.0 impl, we default to Snappy, so it works.
|
||||
},
|
||||
{
|
||||
name: "wrong content-type",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": "yolo",
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "wrong content-type2",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": appProtoContentType + ";proto=yolo",
|
||||
"Content-Encoding": string(SnappyBlockCompression),
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
{
|
||||
name: "not supported content-encoding",
|
||||
reqHeaders: map[string]string{
|
||||
"Content-Type": remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2],
|
||||
"Content-Encoding": "zstd",
|
||||
RemoteWriteVersionHeader: RemoteWriteVersion20HeaderValue,
|
||||
},
|
||||
expectedCode: http.StatusUnsupportedMediaType,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
for k, v := range tc.reqHeaders {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
out, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
_ = resp.Body.Close()
|
||||
require.Equal(t, tc.expectedCode, resp.StatusCode, string(out))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteWriteHandler_V1Message(t *testing.T) {
|
||||
payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
// NOTE: Strictly speaking, even for 1.0 we require headers, but we never verified those
|
||||
// in Prometheus, so keeping like this to not break existing 1.0 clients.
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewWriteHandler(nil, nil, appendable)
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
@ -61,24 +266,22 @@ func TestRemoteWriteHandler(t *testing.T) {
|
|||
j := 0
|
||||
k := 0
|
||||
for _, ts := range writeRequestFixture.Timeseries {
|
||||
labels := LabelProtosToLabels(&b, ts.Labels)
|
||||
labels := ts.ToLabels(&b, nil)
|
||||
for _, s := range ts.Samples {
|
||||
requireEqual(t, mockSample{labels, s.Timestamp, s.Value}, appendable.samples[i])
|
||||
i++
|
||||
}
|
||||
|
||||
for _, e := range ts.Exemplars {
|
||||
exemplarLabels := LabelProtosToLabels(&b, e.Labels)
|
||||
exemplarLabels := e.ToExemplar(&b, nil).Labels
|
||||
requireEqual(t, mockExemplar{labels, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||
j++
|
||||
}
|
||||
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
fh := FloatHistogramProtoToFloatHistogram(hp)
|
||||
fh := hp.ToFloatHistogram()
|
||||
requireEqual(t, mockHistogram{labels, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
||||
} else {
|
||||
h := HistogramProtoToHistogram(hp)
|
||||
h := hp.ToIntHistogram()
|
||||
requireEqual(t, mockHistogram{labels, hp.Timestamp, h, nil}, appendable.histograms[k])
|
||||
}
|
||||
|
||||
|
@ -87,8 +290,66 @@ func TestRemoteWriteHandler(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrderSample(t *testing.T) {
|
||||
tests := []struct {
|
||||
func TestRemoteWriteHandler_V2Message(t *testing.T) {
|
||||
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
|
||||
b := labels.NewScratchBuilder(0)
|
||||
i := 0
|
||||
j := 0
|
||||
k := 0
|
||||
for _, ts := range writeV2RequestFixture.Timeseries {
|
||||
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
|
||||
|
||||
for _, s := range ts.Samples {
|
||||
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
|
||||
|
||||
switch i {
|
||||
case 0:
|
||||
requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i])
|
||||
case 1:
|
||||
requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i])
|
||||
default:
|
||||
t.Fatal("more series/samples then expected")
|
||||
}
|
||||
i++
|
||||
}
|
||||
for _, e := range ts.Exemplars {
|
||||
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
|
||||
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
|
||||
j++
|
||||
}
|
||||
for _, hp := range ts.Histograms {
|
||||
if hp.IsFloatHistogram() {
|
||||
fh := hp.ToFloatHistogram()
|
||||
requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
|
||||
} else {
|
||||
h := hp.ToIntHistogram()
|
||||
requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
|
||||
}
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrderSample_V1Message(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
Name string
|
||||
Timestamp int64
|
||||
}{
|
||||
|
@ -100,23 +361,59 @@ func TestOutOfOrderSample(t *testing.T) {
|
|||
Name: "future",
|
||||
Timestamp: math.MaxInt64,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
} {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Samples: []prompb.Sample{{Value: 1, Timestamp: tc.Timestamp}},
|
||||
}}, nil, nil, nil, nil)
|
||||
}}, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latestSample: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||
appendable := &mockAppendable{latestSample: 100}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrderSample_V2Message(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
Name string
|
||||
Timestamp int64
|
||||
}{
|
||||
{
|
||||
Name: "historic",
|
||||
Timestamp: 0,
|
||||
},
|
||||
{
|
||||
Name: "future",
|
||||
Timestamp: math.MaxInt64,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
|
||||
LabelsRefs: []uint32{1, 2},
|
||||
Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
|
||||
}}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
|
||||
appendable := &mockAppendable{latestSample: 100}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
@ -128,9 +425,9 @@ func TestOutOfOrderSample(t *testing.T) {
|
|||
}
|
||||
|
||||
// This test case currently aims to verify that the WriteHandler endpoint
|
||||
// don't fail on ingestion errors since the exemplar storage is
|
||||
// don't fail on exemplar ingestion errors since the exemplar storage is
|
||||
// still experimental.
|
||||
func TestOutOfOrderExemplar(t *testing.T) {
|
||||
func TestOutOfOrderExemplar_V1Message(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
Timestamp int64
|
||||
|
@ -147,19 +444,17 @@ func TestOutOfOrderExemplar(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "foo", Value: "bar"}}, Value: 1, Timestamp: tc.Timestamp}},
|
||||
}}, nil, nil, nil, nil)
|
||||
}}, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latestExemplar: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||
appendable := &mockAppendable{latestExemplar: 100}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
@ -171,7 +466,7 @@ func TestOutOfOrderExemplar(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrderHistogram(t *testing.T) {
|
||||
func TestOutOfOrderExemplar_V2Message(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
Timestamp int64
|
||||
|
@ -188,19 +483,58 @@ func TestOutOfOrderHistogram(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
|
||||
LabelsRefs: []uint32{1, 2},
|
||||
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
|
||||
}}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
|
||||
appendable := &mockAppendable{latestExemplar: 100}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
|
||||
require.Equal(t, http.StatusNoContent, resp.StatusCode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutOfOrderHistogram_V1Message(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
Name string
|
||||
Timestamp int64
|
||||
}{
|
||||
{
|
||||
Name: "historic",
|
||||
Timestamp: 0,
|
||||
},
|
||||
{
|
||||
Name: "future",
|
||||
Timestamp: math.MaxInt64,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
payload, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
Labels: []prompb.Label{{Name: "__name__", Value: "test_metric"}},
|
||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(tc.Timestamp, &testHistogram), FloatHistogramToHistogramProto(1, testHistogram.ToFloat(nil))},
|
||||
}}, nil, nil, nil, nil)
|
||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(tc.Timestamp, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||
}}, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
latestHistogram: 100,
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||
appendable := &mockAppendable{latestHistogram: 100}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
@ -211,9 +545,49 @@ func TestOutOfOrderHistogram(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkRemoteWritehandler(b *testing.B) {
|
||||
func TestOutOfOrderHistogram_V2Message(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
Name string
|
||||
Timestamp int64
|
||||
}{
|
||||
{
|
||||
Name: "historic",
|
||||
Timestamp: 0,
|
||||
},
|
||||
{
|
||||
Name: "future",
|
||||
Timestamp: math.MaxInt64,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
|
||||
LabelsRefs: []uint32{0, 1},
|
||||
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
|
||||
}}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
|
||||
appendable := &mockAppendable{latestHistogram: 100}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRemoteWriteHandler(b *testing.B) {
|
||||
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
|
||||
reqs := []*http.Request{}
|
||||
var reqs []*http.Request
|
||||
for i := 0; i < b.N; i++ {
|
||||
num := strings.Repeat(strconv.Itoa(i), 16)
|
||||
buf, _, _, err := buildWriteRequest(nil, []prompb.TimeSeries{{
|
||||
|
@ -221,8 +595,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
|
|||
{Name: "__name__", Value: "test_metric"},
|
||||
{Name: "test_label_name_" + num, Value: labelValue + num},
|
||||
},
|
||||
Histograms: []prompb.Histogram{HistogramToHistogramProto(0, &testHistogram)},
|
||||
}}, nil, nil, nil, nil)
|
||||
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram)},
|
||||
}}, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(b, err)
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
require.NoError(b, err)
|
||||
|
@ -230,7 +604,8 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
|
|||
}
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||
// TODO: test with other proto format(s)
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
b.ResetTimer()
|
||||
|
@ -239,17 +614,39 @@ func BenchmarkRemoteWritehandler(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCommitErr(t *testing.T) {
|
||||
buf, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil)
|
||||
func TestCommitErr_V1Message(t *testing.T) {
|
||||
payload, _, _, err := buildWriteRequest(nil, writeRequestFixture.Timeseries, nil, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
appendable := &mockAppendable{
|
||||
commitErr: fmt.Errorf("commit error"),
|
||||
}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable)
|
||||
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
||||
resp := recorder.Result()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
|
||||
require.Equal(t, "commit error\n", string(body))
|
||||
}
|
||||
|
||||
func TestCommitErr_V2Message(t *testing.T) {
|
||||
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(payload))
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
|
||||
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
|
||||
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
|
||||
|
||||
appendable := &mockAppendable{commitErr: fmt.Errorf("commit error")}
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
@ -275,10 +672,10 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
|
|||
b.Cleanup(func() {
|
||||
require.NoError(b, db.Close())
|
||||
})
|
||||
// TODO: test with other proto format(s)
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
|
||||
|
||||
handler := NewWriteHandler(log.NewNopLogger(), nil, db.Head())
|
||||
|
||||
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil)
|
||||
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
|
||||
require.NoError(b, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(buf))
|
||||
|
@ -291,7 +688,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
|
|||
|
||||
var bufRequests [][]byte
|
||||
for i := 0; i < 100; i++ {
|
||||
buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil)
|
||||
buf, _, _, err = buildWriteRequest(nil, genSeriesWithSample(1000, int64(80+i)*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
|
||||
require.NoError(b, err)
|
||||
bufRequests = append(bufRequests, buf)
|
||||
}
|
||||
|
@ -328,7 +725,9 @@ type mockAppendable struct {
|
|||
exemplars []mockExemplar
|
||||
latestHistogram int64
|
||||
histograms []mockHistogram
|
||||
commitErr error
|
||||
metadata []mockMetadata
|
||||
|
||||
commitErr error
|
||||
}
|
||||
|
||||
type mockSample struct {
|
||||
|
@ -351,10 +750,17 @@ type mockHistogram struct {
|
|||
fh *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
type mockMetadata struct {
|
||||
l labels.Labels
|
||||
m metadata.Metadata
|
||||
}
|
||||
|
||||
// Wrapper to instruct go-cmp package to compare a list of structs with unexported fields.
|
||||
func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
|
||||
t.Helper()
|
||||
|
||||
testutil.RequireEqualWithOptions(t, expected, actual,
|
||||
[]cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{})},
|
||||
[]cmp.Option{cmp.AllowUnexported(mockSample{}), cmp.AllowUnexported(mockExemplar{}), cmp.AllowUnexported(mockHistogram{}), cmp.AllowUnexported(mockMetadata{})},
|
||||
msgAndArgs...)
|
||||
}
|
||||
|
||||
|
@ -400,13 +806,14 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
|
||||
// TODO: Wire metadata in a mockAppendable field when we get around to handling metadata in remote_write.
|
||||
// UpdateMetadata is no-op for remote write (where mockAppendable is being used to test) for now.
|
||||
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
|
||||
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
|
||||
// AppendCTZeroSample is no-op for remote-write for now.
|
||||
// TODO(bwplotka): Add support for PRW 2.0 for CT zero feature (but also we might
|
||||
// replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218).
|
||||
return 0, nil
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ package remote
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
@ -43,11 +44,12 @@ func testRemoteWriteConfig() *config.RemoteWriteConfig {
|
|||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoDuplicateWriteConfigs(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_NoDuplicateWriteConfigs(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg1 := config.RemoteWriteConfig{
|
||||
|
@ -58,7 +60,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
|
|||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
cfg2 := config.RemoteWriteConfig{
|
||||
Name: "write-2",
|
||||
|
@ -68,7 +71,8 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
|
|||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
cfg3 := config.RemoteWriteConfig{
|
||||
URL: &common_config.URL{
|
||||
|
@ -77,61 +81,49 @@ func TestNoDuplicateWriteConfigs(t *testing.T) {
|
|||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
|
||||
type testcase struct {
|
||||
cfgs []*config.RemoteWriteConfig
|
||||
err bool
|
||||
}
|
||||
|
||||
cases := []testcase{
|
||||
for _, tc := range []struct {
|
||||
cfgs []*config.RemoteWriteConfig
|
||||
expectedErr error
|
||||
}{
|
||||
{ // Two duplicates, we should get an error.
|
||||
cfgs: []*config.RemoteWriteConfig{
|
||||
&cfg1,
|
||||
&cfg1,
|
||||
},
|
||||
err: true,
|
||||
cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg1},
|
||||
expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"),
|
||||
},
|
||||
{ // Duplicates but with different names, we should not get an error.
|
||||
cfgs: []*config.RemoteWriteConfig{
|
||||
&cfg1,
|
||||
&cfg2,
|
||||
},
|
||||
err: false,
|
||||
cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg2},
|
||||
},
|
||||
{ // Duplicates but one with no name, we should not get an error.
|
||||
cfgs: []*config.RemoteWriteConfig{
|
||||
&cfg1,
|
||||
&cfg3,
|
||||
},
|
||||
err: false,
|
||||
cfgs: []*config.RemoteWriteConfig{&cfg1, &cfg3},
|
||||
},
|
||||
{ // Duplicates both with no name, we should get an error.
|
||||
cfgs: []*config.RemoteWriteConfig{
|
||||
&cfg3,
|
||||
&cfg3,
|
||||
},
|
||||
err: true,
|
||||
cfgs: []*config.RemoteWriteConfig{&cfg3, &cfg3},
|
||||
expectedErr: errors.New("duplicate remote write configs are not allowed, found duplicate for URL: http://localhost"),
|
||||
},
|
||||
}
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: tc.cfgs,
|
||||
}
|
||||
err := s.ApplyConfig(conf)
|
||||
if tc.expectedErr == nil {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.expectedErr, err)
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: tc.cfgs,
|
||||
}
|
||||
err := s.ApplyConfig(conf)
|
||||
gotError := err != nil
|
||||
require.Equal(t, tc.err, gotError)
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRestartOnNameChange(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_RestartOnNameChange(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
cfg := testRemoteWriteConfig()
|
||||
|
@ -139,13 +131,11 @@ func TestRestartOnNameChange(t *testing.T) {
|
|||
hash, err := toHash(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil)
|
||||
s := NewWriteStorage(nil, nil, dir, time.Millisecond, nil, false)
|
||||
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
cfg,
|
||||
},
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{cfg},
|
||||
}
|
||||
require.NoError(t, s.ApplyConfig(conf))
|
||||
require.Equal(t, s.queues[hash].client().Name(), cfg.Name)
|
||||
|
@ -157,14 +147,13 @@ func TestRestartOnNameChange(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, s.queues[hash].client().Name(), conf.RemoteWriteConfigs[0].Name)
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
||||
func TestUpdateWithRegisterer(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_UpdateWithRegisterer(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil)
|
||||
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Millisecond, nil, false)
|
||||
c1 := &config.RemoteWriteConfig{
|
||||
Name: "named",
|
||||
URL: &common_config.URL{
|
||||
|
@ -173,7 +162,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
|
|||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
c2 := &config.RemoteWriteConfig{
|
||||
URL: &common_config.URL{
|
||||
|
@ -182,7 +172,8 @@ func TestUpdateWithRegisterer(t *testing.T) {
|
|||
Host: "localhost",
|
||||
},
|
||||
},
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
|
@ -197,14 +188,13 @@ func TestUpdateWithRegisterer(t *testing.T) {
|
|||
require.Equal(t, 10, queue.cfg.MaxShards)
|
||||
}
|
||||
|
||||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
||||
func TestWriteStorageLifecycle(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_Lifecycle(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.DefaultGlobalConfig,
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
|
@ -214,14 +204,13 @@ func TestWriteStorageLifecycle(t *testing.T) {
|
|||
require.NoError(t, s.ApplyConfig(conf))
|
||||
require.Len(t, s.queues, 1)
|
||||
|
||||
err := s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
||||
func TestUpdateExternalLabels(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_UpdateExternalLabels(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil)
|
||||
s := NewWriteStorage(nil, prometheus.NewRegistry(), dir, time.Second, nil, false)
|
||||
|
||||
externalLabels := labels.FromStrings("external", "true")
|
||||
conf := &config.Config{
|
||||
|
@ -243,15 +232,13 @@ func TestUpdateExternalLabels(t *testing.T) {
|
|||
require.Len(t, s.queues, 1)
|
||||
require.Equal(t, []labels.Label{{Name: "external", Value: "true"}}, s.queues[hash].externalLabels)
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
||||
func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_Idempotent(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
|
||||
|
||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
conf := &config.Config{
|
||||
GlobalConfig: config.GlobalConfig{},
|
||||
RemoteWriteConfigs: []*config.RemoteWriteConfig{
|
||||
|
@ -269,14 +256,13 @@ func TestWriteStorageApplyConfigsIdempotent(t *testing.T) {
|
|||
_, hashExists := s.queues[hash]
|
||||
require.True(t, hashExists, "Queue pointer should have remained the same")
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
||||
func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
||||
func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil)
|
||||
s := NewWriteStorage(nil, nil, dir, defaultFlushDeadline, nil, false)
|
||||
|
||||
c0 := &config.RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(10 * time.Second),
|
||||
|
@ -286,6 +272,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
|||
Regex: relabel.MustNewRegexp(".+"),
|
||||
},
|
||||
},
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
c1 := &config.RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(20 * time.Second),
|
||||
|
@ -293,10 +280,12 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
|||
HTTPClientConfig: common_config.HTTPClientConfig{
|
||||
BearerToken: "foo",
|
||||
},
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
c2 := &config.RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
QueueConfig: config.DefaultQueueConfig,
|
||||
ProtobufMessage: config.RemoteWriteProtoMsgV1,
|
||||
}
|
||||
|
||||
conf := &config.Config{
|
||||
|
@ -376,8 +365,7 @@ func TestWriteStorageApplyConfigsPartialUpdate(t *testing.T) {
|
|||
_, hashExists = s.queues[hashes[2]]
|
||||
require.True(t, hashExists, "Pointer of unchanged queue should have remained the same")
|
||||
|
||||
err = s.Close()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.Close())
|
||||
}
|
||||
|
||||
func TestOTLPWriteHandler(t *testing.T) {
|
||||
|
|
|
@ -89,7 +89,7 @@ func createTestAgentDB(t testing.TB, reg prometheus.Registerer, opts *Options) *
|
|||
t.Helper()
|
||||
|
||||
dbDir := t.TempDir()
|
||||
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil)
|
||||
rs := remote.NewStorage(log.NewNopLogger(), reg, startTime, dbDir, time.Second*30, nil, false)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rs.Close())
|
||||
})
|
||||
|
@ -585,7 +585,7 @@ func TestLockfile(t *testing.T) {
|
|||
tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
|
||||
logger := log.NewNopLogger()
|
||||
reg := prometheus.NewRegistry()
|
||||
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil)
|
||||
rs := remote.NewStorage(logger, reg, startTime, data, time.Second*30, nil, false)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rs.Close())
|
||||
})
|
||||
|
@ -605,7 +605,7 @@ func TestLockfile(t *testing.T) {
|
|||
|
||||
func Test_ExistingWAL_NextRef(t *testing.T) {
|
||||
dbDir := t.TempDir()
|
||||
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil)
|
||||
rs := remote.NewStorage(log.NewNopLogger(), nil, startTime, dbDir, time.Second*30, nil, false)
|
||||
defer func() {
|
||||
require.NoError(t, rs.Close())
|
||||
}()
|
||||
|
|
|
@ -133,15 +133,6 @@ type Meta struct {
|
|||
// Time range the data covers.
|
||||
// When MaxTime == math.MaxInt64 the chunk is still open and being appended to.
|
||||
MinTime, MaxTime int64
|
||||
|
||||
// OOOLastRef, OOOLastMinTime and OOOLastMaxTime are kept as markers for
|
||||
// overlapping chunks.
|
||||
// These fields point to the last created out of order Chunk (the head) that existed
|
||||
// when Series() was called and was overlapping.
|
||||
// Series() and Chunk() method responses should be consistent for the same
|
||||
// query even if new data is added in between the calls.
|
||||
OOOLastRef ChunkRef
|
||||
OOOLastMinTime, OOOLastMaxTime int64
|
||||
}
|
||||
|
||||
// ChunkFromSamples requires all samples to have the same type.
|
||||
|
|
|
@ -1407,6 +1407,9 @@ func (db *DB) compactHead(head *RangeHead) error {
|
|||
if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil {
|
||||
return fmt.Errorf("head memory truncate: %w", err)
|
||||
}
|
||||
|
||||
db.head.RebuildSymbolTable(db.logger)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
252
tsdb/db_test.go
252
tsdb/db_test.go
|
@ -4495,6 +4495,14 @@ func TestMetadataAssertInMemoryData(t *testing.T) {
|
|||
//
|
||||
// are not included in this compaction.
|
||||
func TestOOOCompaction(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOCompaction(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -4516,9 +4524,9 @@ func TestOOOCompaction(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, series2, ts, float64(2*ts))
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -4551,8 +4559,8 @@ func TestOOOCompaction(t *testing.T) {
|
|||
fromMins, toMins := r[0], r[1]
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
|
@ -4564,7 +4572,7 @@ func TestOOOCompaction(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
verifyDBSamples() // Before any compaction.
|
||||
|
@ -4619,8 +4627,8 @@ func TestOOOCompaction(t *testing.T) {
|
|||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
series1.String(): series1Samples,
|
||||
|
@ -4631,7 +4639,7 @@ func TestOOOCompaction(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
// Checking for expected data in the blocks.
|
||||
|
@ -4675,6 +4683,14 @@ func TestOOOCompaction(t *testing.T) {
|
|||
// TestOOOCompactionWithNormalCompaction tests if OOO compaction is performed
|
||||
// when the normal head's compaction is done.
|
||||
func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOCompactionWithNormalCompaction(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -4696,9 +4712,9 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, series2, ts, float64(2*ts))
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -4751,8 +4767,8 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
|||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
series1.String(): series1Samples,
|
||||
|
@ -4763,7 +4779,7 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
// Checking for expected data in the blocks.
|
||||
|
@ -4775,6 +4791,14 @@ func TestOOOCompactionWithNormalCompaction(t *testing.T) {
|
|||
// configured to not have wal and wbl but its able to compact both the in-order
|
||||
// and out-of-order head.
|
||||
func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOCompactionWithDisabledWriteLog(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -4797,9 +4821,9 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, series2, ts, float64(2*ts))
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -4852,8 +4876,8 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
|
|||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts))
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
series1.String(): series1Samples,
|
||||
|
@ -4864,7 +4888,7 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
// Checking for expected data in the blocks.
|
||||
|
@ -4876,6 +4900,14 @@ func TestOOOCompactionWithDisabledWriteLog(t *testing.T) {
|
|||
// missing after a restart while snapshot was enabled, but the query still returns the right
|
||||
// data from the mmap chunks.
|
||||
func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -4898,9 +4930,9 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, series2, ts, float64(2*ts))
|
||||
_, _, err = scenario.appendFunc(app, series2, ts, 2*ts)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -4946,8 +4978,8 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
|
|||
series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||
series2Samples = append(series2Samples, sample{ts, float64(2 * ts), nil, nil})
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2))
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
series1.String(): series1Samples,
|
||||
|
@ -4958,7 +4990,7 @@ func TestOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
// Checking for expected ooo data from mmap chunks.
|
||||
|
@ -5159,6 +5191,14 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOOOAppendAndQuery(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOAppendAndQuery(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 30
|
||||
opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
|
||||
|
@ -5180,13 +5220,13 @@ func TestOOOAppendAndQuery(t *testing.T) {
|
|||
key := lbls.String()
|
||||
from, to := minutes(fromMins), minutes(toMins)
|
||||
for min := from; min <= to; min += time.Minute.Milliseconds() {
|
||||
val := rand.Float64()
|
||||
_, err := app.Append(0, lbls, min, val)
|
||||
val := rand.Intn(1000)
|
||||
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
|
||||
if faceError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
appendedSamples[key] = append(appendedSamples[key], sample{t: min, f: val})
|
||||
appendedSamples[key] = append(appendedSamples[key], s)
|
||||
totalSamples++
|
||||
}
|
||||
}
|
||||
|
@ -5222,7 +5262,7 @@ func TestOOOAppendAndQuery(t *testing.T) {
|
|||
expSamples[k] = append(expSamples[k], s)
|
||||
}
|
||||
}
|
||||
require.Equal(t, expSamples, seriesSet)
|
||||
requireEqualSeries(t, expSamples, seriesSet, true)
|
||||
requireEqualOOOSamples(t, totalSamples-2, db)
|
||||
}
|
||||
|
||||
|
@ -5284,6 +5324,14 @@ func TestOOOAppendAndQuery(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOOODisabled(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOODisabled(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOODisabled(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderTimeWindow = 0
|
||||
db := openTestDB(t, opts, nil)
|
||||
|
@ -5297,19 +5345,19 @@ func TestOOODisabled(t *testing.T) {
|
|||
expSamples := make(map[string][]chunks.Sample)
|
||||
totalSamples := 0
|
||||
failedSamples := 0
|
||||
addSample := func(lbls labels.Labels, fromMins, toMins int64, faceError bool) {
|
||||
|
||||
addSample := func(db *DB, lbls labels.Labels, fromMins, toMins int64, faceError bool) {
|
||||
app := db.Appender(context.Background())
|
||||
key := lbls.String()
|
||||
from, to := minutes(fromMins), minutes(toMins)
|
||||
for min := from; min <= to; min += time.Minute.Milliseconds() {
|
||||
val := rand.Float64()
|
||||
_, err := app.Append(0, lbls, min, val)
|
||||
_, _, err := scenario.appendFunc(app, lbls, min, min)
|
||||
if faceError {
|
||||
require.Error(t, err)
|
||||
failedSamples++
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
expSamples[key] = append(expSamples[key], sample{t: min, f: val})
|
||||
expSamples[key] = append(expSamples[key], scenario.sampleFunc(min, min))
|
||||
totalSamples++
|
||||
}
|
||||
}
|
||||
|
@ -5320,21 +5368,21 @@ func TestOOODisabled(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
addSample(s1, 300, 300, false) // In-order samples.
|
||||
addSample(s1, 250, 260, true) // Some ooo samples.
|
||||
addSample(s1, 59, 59, true) // Out of time window.
|
||||
addSample(s1, 60, 65, true) // At the edge of time window, also it would be "out of bound" without the ooo support.
|
||||
addSample(s1, 59, 59, true) // Out of time window again.
|
||||
addSample(s1, 301, 310, false) // More in-order samples.
|
||||
addSample(db, s1, 300, 300, false) // In-order samples.
|
||||
addSample(db, s1, 250, 260, true) // Some ooo samples.
|
||||
addSample(db, s1, 59, 59, true) // Out of time window.
|
||||
addSample(db, s1, 60, 65, true) // At the edge of time window, also it would be "out of bound" without the ooo support.
|
||||
addSample(db, s1, 59, 59, true) // Out of time window again.
|
||||
addSample(db, s1, 301, 310, false) // More in-order samples.
|
||||
|
||||
querier, err := db.Querier(math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
|
||||
seriesSet := query(t, querier, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar."))
|
||||
require.Equal(t, expSamples, seriesSet)
|
||||
requireEqualSeries(t, expSamples, seriesSet, true)
|
||||
requireEqualOOOSamples(t, 0, db)
|
||||
require.Equal(t, float64(failedSamples),
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat))+prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat)),
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType))+prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)),
|
||||
"number of ooo/oob samples mismatch")
|
||||
|
||||
// Verifying that no OOO artifacts were generated.
|
||||
|
@ -5349,6 +5397,14 @@ func TestOOODisabled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWBLAndMmapReplay(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testWBLAndMmapReplay(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 30
|
||||
opts.OutOfOrderTimeWindow = 4 * time.Hour.Milliseconds()
|
||||
|
@ -5369,10 +5425,10 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
|||
key := lbls.String()
|
||||
from, to := minutes(fromMins), minutes(toMins)
|
||||
for min := from; min <= to; min += time.Minute.Milliseconds() {
|
||||
val := rand.Float64()
|
||||
_, err := app.Append(0, lbls, min, val)
|
||||
val := rand.Intn(1000)
|
||||
_, s, err := scenario.appendFunc(app, lbls, min, int64(val))
|
||||
require.NoError(t, err)
|
||||
expSamples[key] = append(expSamples[key], sample{t: min, f: val})
|
||||
expSamples[key] = append(expSamples[key], s)
|
||||
totalSamples++
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -5390,7 +5446,7 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
|||
})
|
||||
exp[k] = v
|
||||
}
|
||||
require.Equal(t, exp, seriesSet)
|
||||
requireEqualSeries(t, exp, seriesSet, true)
|
||||
}
|
||||
|
||||
// In-order samples.
|
||||
|
@ -5413,10 +5469,9 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
|||
chk, err := db.head.chunkDiskMapper.Chunk(mc.ref)
|
||||
require.NoError(t, err)
|
||||
it := chk.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
ts, val := it.At()
|
||||
s1MmapSamples = append(s1MmapSamples, sample{t: ts, f: val})
|
||||
}
|
||||
smpls, err := storage.ExpandSamples(it, newSample)
|
||||
require.NoError(t, err)
|
||||
s1MmapSamples = append(s1MmapSamples, smpls...)
|
||||
}
|
||||
require.NotEmpty(t, s1MmapSamples)
|
||||
|
||||
|
@ -5534,6 +5589,14 @@ func TestWBLAndMmapReplay(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOOOCompactionFailure(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOCompactionFailure(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -5554,7 +5617,7 @@ func TestOOOCompactionFailure(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -5642,7 +5705,7 @@ func TestOOOCompactionFailure(t *testing.T) {
|
|||
series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1)
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
series1Samples = append(series1Samples, sample{ts, float64(ts), nil, nil})
|
||||
series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts))
|
||||
}
|
||||
expRes := map[string][]chunks.Sample{
|
||||
series1.String(): series1Samples,
|
||||
|
@ -5650,9 +5713,8 @@ func TestOOOCompactionFailure(t *testing.T) {
|
|||
|
||||
q, err := NewBlockQuerier(block, math.MinInt64, math.MaxInt64)
|
||||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
// Checking for expected data in the blocks.
|
||||
|
@ -5819,6 +5881,14 @@ func TestWBLCorruption(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOOOMmapCorruption(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOMmapCorruption(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
|
||||
opts := DefaultOptions()
|
||||
|
@ -5838,11 +5908,11 @@ func TestOOOMmapCorruption(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
|
||||
allSamples = append(allSamples, s)
|
||||
if inMmapAfterCorruption {
|
||||
expInMmapChunks = append(expInMmapChunks, sample{t: ts, f: float64(ts)})
|
||||
expInMmapChunks = append(expInMmapChunks, s)
|
||||
}
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -5880,7 +5950,7 @@ func TestOOOMmapCorruption(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
verifySamples(allSamples)
|
||||
|
@ -5942,6 +6012,14 @@ func TestOOOMmapCorruption(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOutOfOrderRuntimeConfig(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) {
|
||||
ctx := context.Background()
|
||||
|
||||
getDB := func(oooTimeWindow int64) *DB {
|
||||
|
@ -5975,10 +6053,10 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
if success {
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
|
||||
allSamples = append(allSamples, s)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
@ -6000,7 +6078,7 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
doOOOCompaction := func(t *testing.T, db *DB) {
|
||||
|
@ -6173,12 +6251,20 @@ func TestOutOfOrderRuntimeConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNoGapAfterRestartWithOOO(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testNoGapAfterRestartWithOOO(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) {
|
||||
series1 := labels.FromStrings("foo", "bar1")
|
||||
addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) {
|
||||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, _, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
if success {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
|
@ -6192,7 +6278,7 @@ func TestNoGapAfterRestartWithOOO(t *testing.T) {
|
|||
var expSamples []chunks.Sample
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
expSamples = append(expSamples, sample{t: ts, f: float64(ts)})
|
||||
expSamples = append(expSamples, scenario.sampleFunc(ts, ts))
|
||||
}
|
||||
|
||||
expRes := map[string][]chunks.Sample{
|
||||
|
@ -6203,7 +6289,7 @@ func TestNoGapAfterRestartWithOOO(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
|
@ -6280,6 +6366,14 @@ func TestNoGapAfterRestartWithOOO(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testWblReplayAfterOOODisableAndRestart(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
|
||||
opts := DefaultOptions()
|
||||
|
@ -6298,9 +6392,9 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
|
||||
allSamples = append(allSamples, s)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
@ -6323,7 +6417,7 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
actRes := query(t, q, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
|
||||
require.Equal(t, expRes, actRes)
|
||||
requireEqualSeries(t, expRes, actRes, true)
|
||||
}
|
||||
|
||||
verifySamples(allSamples)
|
||||
|
@ -6339,6 +6433,14 @@ func TestWblReplayAfterOOODisableAndRestart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPanicOnApplyConfig(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testPanicOnApplyConfig(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
|
||||
opts := DefaultOptions()
|
||||
|
@ -6357,9 +6459,9 @@ func TestPanicOnApplyConfig(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
|
||||
allSamples = append(allSamples, s)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
@ -6387,6 +6489,14 @@ func TestPanicOnApplyConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testDiskFillingUpAfterDisablingOOO(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -6406,9 +6516,9 @@ func TestDiskFillingUpAfterDisablingOOO(t *testing.T) {
|
|||
app := db.Appender(context.Background())
|
||||
for min := fromMins; min <= toMins; min++ {
|
||||
ts := min * time.Minute.Milliseconds()
|
||||
_, err := app.Append(0, series1, ts, float64(ts))
|
||||
_, s, err := scenario.appendFunc(app, series1, ts, ts)
|
||||
require.NoError(t, err)
|
||||
allSamples = append(allSamples, sample{t: ts, f: float64(ts)})
|
||||
allSamples = append(allSamples, s)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
@ -7060,12 +7170,6 @@ Outer:
|
|||
require.NoError(t, writerErr)
|
||||
}
|
||||
|
||||
func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) {
|
||||
require.Equal(t, float64(expectedSamples),
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat)),
|
||||
"number of ooo appended samples mismatch")
|
||||
}
|
||||
|
||||
type mockCompactorFn struct {
|
||||
planFn func() ([]string, error)
|
||||
compactFn func() ([]ulid.ULID, error)
|
||||
|
|
21
tsdb/head.go
21
tsdb/head.go
|
@ -1759,12 +1759,12 @@ type seriesHashmap struct {
|
|||
|
||||
func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries {
|
||||
if s, found := m.unique[hash]; found {
|
||||
if labels.Equal(s.lset, lset) {
|
||||
if labels.Equal(s.labels(), lset) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
for _, s := range m.conflicts[hash] {
|
||||
if labels.Equal(s.lset, lset) {
|
||||
if labels.Equal(s.labels(), lset) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
|
@ -1772,7 +1772,7 @@ func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries {
|
|||
}
|
||||
|
||||
func (m *seriesHashmap) set(hash uint64, s *memSeries) {
|
||||
if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) {
|
||||
if existing, found := m.unique[hash]; !found || labels.Equal(existing.labels(), s.labels()) {
|
||||
m.unique[hash] = s
|
||||
return
|
||||
}
|
||||
|
@ -1781,7 +1781,7 @@ func (m *seriesHashmap) set(hash uint64, s *memSeries) {
|
|||
}
|
||||
l := m.conflicts[hash]
|
||||
for i, prev := range l {
|
||||
if labels.Equal(prev.lset, s.lset) {
|
||||
if labels.Equal(prev.labels(), s.labels()) {
|
||||
l[i] = s
|
||||
return
|
||||
}
|
||||
|
@ -1931,7 +1931,7 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (
|
|||
series.lset.Range(func(l labels.Label) { affected[l] = struct{}{} })
|
||||
s.hashes[hashShard].del(hash, series.ref)
|
||||
delete(s.series[refShard], series.ref)
|
||||
deletedForCallback[series.ref] = series.lset
|
||||
deletedForCallback[series.ref] = series.lset // OK to access lset; series is locked at the top of this function.
|
||||
}
|
||||
|
||||
s.iterForDeletion(check)
|
||||
|
@ -2023,7 +2023,7 @@ func (s *stripeSeries) getOrSet(hash uint64, lset labels.Labels, createSeries fu
|
|||
}
|
||||
// Setting the series in the s.hashes marks the creation of series
|
||||
// as any further calls to this methods would return that series.
|
||||
s.seriesLifecycleCallback.PostCreation(series.lset)
|
||||
s.seriesLifecycleCallback.PostCreation(series.labels())
|
||||
|
||||
i = uint64(series.ref) & uint64(s.size-1)
|
||||
|
||||
|
@ -2064,16 +2064,19 @@ func (s sample) Type() chunkenc.ValueType {
|
|||
// memSeries is the in-memory representation of a series. None of its methods
|
||||
// are goroutine safe and it is the caller's responsibility to lock it.
|
||||
type memSeries struct {
|
||||
sync.Mutex
|
||||
|
||||
// Members up to the Mutex are not changed after construction, so can be accessed without a lock.
|
||||
ref chunks.HeadSeriesRef
|
||||
lset labels.Labels
|
||||
meta *metadata.Metadata
|
||||
|
||||
// Series labels hash to use for sharding purposes. The value is always 0 when sharding has not
|
||||
// been explicitly enabled in TSDB.
|
||||
shardHash uint64
|
||||
|
||||
// Everything after here should only be accessed with the lock held.
|
||||
sync.Mutex
|
||||
|
||||
lset labels.Labels // Locking required with -tags dedupelabels, not otherwise.
|
||||
|
||||
// Immutable chunks on disk that have not yet gone into a block, in order of ascending time stamps.
|
||||
// When compaction runs, chunks get moved into a block and all pointers are shifted like so:
|
||||
//
|
||||
|
|
|
@ -554,7 +554,7 @@ func (a *headAppender) AppendExemplar(ref storage.SeriesRef, lset labels.Labels,
|
|||
// Ensure no empty labels have gotten through.
|
||||
e.Labels = e.Labels.WithoutEmpty()
|
||||
|
||||
err := a.head.exemplars.ValidateExemplar(s.lset, e)
|
||||
err := a.head.exemplars.ValidateExemplar(s.labels(), e)
|
||||
if err != nil {
|
||||
if errors.Is(err, storage.ErrDuplicateExemplar) || errors.Is(err, storage.ErrExemplarsDisabled) {
|
||||
// Duplicate, don't return an error but don't accept the exemplar.
|
||||
|
@ -708,7 +708,7 @@ func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRe
|
|||
return 0, labels.EmptyLabels()
|
||||
}
|
||||
// returned labels must be suitable to pass to Append()
|
||||
return storage.SeriesRef(s.ref), s.lset
|
||||
return storage.SeriesRef(s.ref), s.labels()
|
||||
}
|
||||
|
||||
// log writes all headAppender's data to the WAL.
|
||||
|
@ -816,7 +816,7 @@ func (a *headAppender) Commit() (err error) {
|
|||
continue
|
||||
}
|
||||
// We don't instrument exemplar appends here, all is instrumented by storage.
|
||||
if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil {
|
||||
if err := a.head.exemplars.AddExemplar(s.labels(), e.exemplar); err != nil {
|
||||
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
||||
continue
|
||||
}
|
||||
|
|
95
tsdb/head_dedupelabels.go
Normal file
95
tsdb/head_dedupelabels.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build dedupelabels
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Helper method to access labels under lock.
|
||||
func (s *memSeries) labels() labels.Labels {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.lset
|
||||
}
|
||||
|
||||
// RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values,
|
||||
// replace each series' Labels with one using that SymbolTable.
|
||||
func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable {
|
||||
level.Info(logger).Log("msg", "RebuildSymbolTable starting")
|
||||
st := labels.NewSymbolTable()
|
||||
builder := labels.NewScratchBuilderWithSymbolTable(st, 0)
|
||||
rebuildLabels := func(lbls labels.Labels) labels.Labels {
|
||||
builder.Reset()
|
||||
lbls.Range(func(l labels.Label) {
|
||||
builder.Add(l.Name, l.Value)
|
||||
})
|
||||
return builder.Labels()
|
||||
}
|
||||
|
||||
for i := 0; i < h.series.size; i++ {
|
||||
h.series.locks[i].Lock()
|
||||
|
||||
for _, s := range h.series.hashes[i].unique {
|
||||
s.Lock()
|
||||
s.lset = rebuildLabels(s.lset)
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
for _, all := range h.series.hashes[i].conflicts {
|
||||
for _, s := range all {
|
||||
s.Lock()
|
||||
s.lset = rebuildLabels(s.lset)
|
||||
s.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
h.series.locks[i].Unlock()
|
||||
}
|
||||
type withReset interface{ ResetSymbolTable(*labels.SymbolTable) }
|
||||
if e, ok := h.exemplars.(withReset); ok {
|
||||
e.ResetSymbolTable(st)
|
||||
}
|
||||
level.Info(logger).Log("msg", "RebuildSymbolTable finished", "size", st.Len())
|
||||
return st
|
||||
}
|
||||
|
||||
func (ce *CircularExemplarStorage) ResetSymbolTable(st *labels.SymbolTable) {
|
||||
builder := labels.NewScratchBuilderWithSymbolTable(st, 0)
|
||||
rebuildLabels := func(lbls labels.Labels) labels.Labels {
|
||||
builder.Reset()
|
||||
lbls.Range(func(l labels.Label) {
|
||||
builder.Add(l.Name, l.Value)
|
||||
})
|
||||
return builder.Labels()
|
||||
}
|
||||
|
||||
ce.lock.RLock()
|
||||
defer ce.lock.RUnlock()
|
||||
|
||||
for _, v := range ce.index {
|
||||
v.seriesLabels = rebuildLabels(v.seriesLabels)
|
||||
}
|
||||
for i := range ce.exemplars {
|
||||
if ce.exemplars[i].ref == nil {
|
||||
continue
|
||||
}
|
||||
ce.exemplars[i].exemplar.Labels = rebuildLabels(ce.exemplars[i].exemplar.Labels)
|
||||
}
|
||||
}
|
32
tsdb/head_other.go
Normal file
32
tsdb/head_other.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !dedupelabels
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"github.com/go-kit/log"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
)
|
||||
|
||||
// Helper method to access labels; trivial when not using dedupelabels.
|
||||
func (s *memSeries) labels() labels.Labels {
|
||||
return s.lset
|
||||
}
|
||||
|
||||
// No-op when not using dedupelabels.
|
||||
func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable {
|
||||
return nil
|
||||
}
|
|
@ -142,7 +142,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings {
|
|||
}
|
||||
|
||||
slices.SortFunc(series, func(a, b *memSeries) int {
|
||||
return labels.Compare(a.lset, b.lset)
|
||||
return labels.Compare(a.labels(), b.labels())
|
||||
})
|
||||
|
||||
// Convert back to list.
|
||||
|
@ -189,7 +189,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB
|
|||
h.head.metrics.seriesNotFound.Inc()
|
||||
return storage.ErrNotFound
|
||||
}
|
||||
builder.Assign(s.lset)
|
||||
builder.Assign(s.labels())
|
||||
|
||||
if chks == nil {
|
||||
return nil
|
||||
|
@ -259,7 +259,7 @@ func (h *headIndexReader) LabelValueFor(_ context.Context, id storage.SeriesRef,
|
|||
return "", storage.ErrNotFound
|
||||
}
|
||||
|
||||
value := memSeries.lset.Get(label)
|
||||
value := memSeries.labels().Get(label)
|
||||
if value == "" {
|
||||
return "", storage.ErrNotFound
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ func (h *headIndexReader) LabelNamesFor(ctx context.Context, series index.Postin
|
|||
// when series was garbage collected after the caller got the series IDs.
|
||||
continue
|
||||
}
|
||||
memSeries.lset.Range(func(lbl labels.Label) {
|
||||
memSeries.labels().Range(func(lbl labels.Label) {
|
||||
namesMap[lbl.Name] = struct{}{}
|
||||
})
|
||||
}
|
||||
|
@ -487,55 +487,24 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
|||
|
||||
// We create a temporary slice of chunk metas to hold the information of all
|
||||
// possible chunks that may overlap with the requested chunk.
|
||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks))
|
||||
|
||||
oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
||||
if s.ooo.oooHeadChunk != nil && s.ooo.oooHeadChunk.OverlapsClosedInterval(mint, maxt) {
|
||||
// We only want to append the head chunk if this chunk existed when
|
||||
// Series() was called. This brings consistency in case new data
|
||||
// is added in between Series() and Chunk() calls.
|
||||
if oooHeadRef == meta.OOOLastRef {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
// Ignoring samples added before and after the last known min and max time for this chunk.
|
||||
MinTime: meta.OOOLastMinTime,
|
||||
MaxTime: meta.OOOLastMaxTime,
|
||||
Ref: oooHeadRef,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1)
|
||||
|
||||
for i, c := range s.ooo.oooMmappedChunks {
|
||||
chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
|
||||
// We can skip chunks that came in later than the last known OOOLastRef.
|
||||
if chunkRef > meta.OOOLastRef {
|
||||
break
|
||||
}
|
||||
|
||||
switch {
|
||||
case chunkRef == meta.OOOLastRef:
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
MinTime: meta.OOOLastMinTime,
|
||||
MaxTime: meta.OOOLastMaxTime,
|
||||
Ref: chunkRef,
|
||||
},
|
||||
ref: c.ref,
|
||||
origMinT: c.minTime,
|
||||
origMaxT: c.maxTime,
|
||||
})
|
||||
case c.OverlapsClosedInterval(mint, maxt):
|
||||
if c.OverlapsClosedInterval(mint, maxt) {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{
|
||||
meta: chunks.Meta{
|
||||
MinTime: c.minTime,
|
||||
MaxTime: c.maxTime,
|
||||
Ref: chunkRef,
|
||||
Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))),
|
||||
},
|
||||
ref: c.ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Add in data copied from the head OOO chunk.
|
||||
if meta.Chunk != nil {
|
||||
tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta})
|
||||
}
|
||||
|
||||
// Next we want to sort all the collected chunks by min time so we can find
|
||||
// those that overlap and stop when we know the rest don't.
|
||||
|
@ -548,22 +517,8 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
|||
continue
|
||||
}
|
||||
var iterable chunkenc.Iterable
|
||||
if c.meta.Ref == oooHeadRef {
|
||||
var xor *chunkenc.XORChunk
|
||||
var err error
|
||||
// If head chunk min and max time match the meta OOO markers
|
||||
// that means that the chunk has not expanded so we can append
|
||||
// it as it is.
|
||||
if s.ooo.oooHeadChunk.minTime == meta.OOOLastMinTime && s.ooo.oooHeadChunk.maxTime == meta.OOOLastMaxTime {
|
||||
xor, err = s.ooo.oooHeadChunk.chunk.ToXOR() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXOR() function only for the usecase where Bytes() is called.
|
||||
} else {
|
||||
// We need to remove samples that are outside of the markers
|
||||
xor, err = s.ooo.oooHeadChunk.chunk.ToXORBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert ooo head chunk to xor chunk: %w", err)
|
||||
}
|
||||
iterable = xor
|
||||
if c.meta.Chunk != nil {
|
||||
iterable = c.meta.Chunk
|
||||
} else {
|
||||
chk, err := cdm.Chunk(c.ref)
|
||||
if err != nil {
|
||||
|
@ -573,16 +528,7 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
|||
}
|
||||
return nil, err
|
||||
}
|
||||
if c.meta.Ref == meta.OOOLastRef &&
|
||||
(c.origMinT != meta.OOOLastMinTime || c.origMaxT != meta.OOOLastMaxTime) {
|
||||
// The head expanded and was memory mapped so now we need to
|
||||
// wrap the chunk within a chunk that doesnt allows us to iterate
|
||||
// through samples out of the OOOLastMinT and OOOLastMaxT
|
||||
// markers.
|
||||
iterable = boundedIterable{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime}
|
||||
} else {
|
||||
iterable = chk
|
||||
}
|
||||
iterable = chk
|
||||
}
|
||||
mc.chunkIterables = append(mc.chunkIterables, iterable)
|
||||
if c.meta.MaxTime > absoluteMax {
|
||||
|
@ -593,74 +539,6 @@ func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMappe
|
|||
return mc, nil
|
||||
}
|
||||
|
||||
var _ chunkenc.Iterable = &boundedIterable{}
|
||||
|
||||
// boundedIterable is an implementation of chunkenc.Iterable that uses a
|
||||
// boundedIterator that only iterates through samples which timestamps are
|
||||
// >= minT and <= maxT.
|
||||
type boundedIterable struct {
|
||||
chunk chunkenc.Chunk
|
||||
minT int64
|
||||
maxT int64
|
||||
}
|
||||
|
||||
func (b boundedIterable) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator {
|
||||
it := b.chunk.Iterator(iterator)
|
||||
if it == nil {
|
||||
panic("iterator shouldn't be nil")
|
||||
}
|
||||
return boundedIterator{it, b.minT, b.maxT}
|
||||
}
|
||||
|
||||
var _ chunkenc.Iterator = &boundedIterator{}
|
||||
|
||||
// boundedIterator is an implementation of Iterator that only iterates through
|
||||
// samples which timestamps are >= minT and <= maxT.
|
||||
type boundedIterator struct {
|
||||
chunkenc.Iterator
|
||||
minT int64
|
||||
maxT int64
|
||||
}
|
||||
|
||||
// Next the first time its called it will advance as many positions as necessary
|
||||
// until its able to find a sample within the bounds minT and maxT.
|
||||
// If there are samples within bounds it will advance one by one amongst them.
|
||||
// If there are no samples within bounds it will return false.
|
||||
func (b boundedIterator) Next() chunkenc.ValueType {
|
||||
for b.Iterator.Next() == chunkenc.ValFloat {
|
||||
t, _ := b.Iterator.At()
|
||||
switch {
|
||||
case t < b.minT:
|
||||
continue
|
||||
case t > b.maxT:
|
||||
return chunkenc.ValNone
|
||||
default:
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
|
||||
func (b boundedIterator) Seek(t int64) chunkenc.ValueType {
|
||||
if t < b.minT {
|
||||
// We must seek at least up to b.minT if it is asked for something before that.
|
||||
val := b.Iterator.Seek(b.minT)
|
||||
if !(val == chunkenc.ValFloat) {
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
t, _ := b.Iterator.At()
|
||||
if t <= b.maxT {
|
||||
return chunkenc.ValFloat
|
||||
}
|
||||
}
|
||||
if t > b.maxT {
|
||||
// We seek anyway so that the subsequent Next() calls will also return false.
|
||||
b.Iterator.Seek(t)
|
||||
return chunkenc.ValNone
|
||||
}
|
||||
return b.Iterator.Seek(t)
|
||||
}
|
||||
|
||||
// safeHeadChunk makes sure that the chunk can be accessed without a race condition.
|
||||
type safeHeadChunk struct {
|
||||
chunkenc.Chunk
|
||||
|
|
|
@ -15,7 +15,6 @@ package tsdb
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
|
@ -26,150 +25,6 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
)
|
||||
|
||||
func TestBoundedChunk(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inputChunk chunkenc.Chunk
|
||||
inputMinT int64
|
||||
inputMaxT int64
|
||||
initialSeek int64
|
||||
seekIsASuccess bool
|
||||
expSamples []sample
|
||||
}{
|
||||
{
|
||||
name: "if there are no samples it returns nothing",
|
||||
inputChunk: newTestChunk(0),
|
||||
expSamples: nil,
|
||||
},
|
||||
{
|
||||
name: "bounds represent a single sample",
|
||||
inputChunk: newTestChunk(10),
|
||||
expSamples: []sample{
|
||||
{0, 0, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if there are bounds set only samples within them are returned",
|
||||
inputChunk: newTestChunk(10),
|
||||
inputMinT: 1,
|
||||
inputMaxT: 8,
|
||||
expSamples: []sample{
|
||||
{1, 1, nil, nil},
|
||||
{2, 2, nil, nil},
|
||||
{3, 3, nil, nil},
|
||||
{4, 4, nil, nil},
|
||||
{5, 5, nil, nil},
|
||||
{6, 6, nil, nil},
|
||||
{7, 7, nil, nil},
|
||||
{8, 8, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if bounds set and only maxt is less than actual maxt",
|
||||
inputChunk: newTestChunk(10),
|
||||
inputMinT: 0,
|
||||
inputMaxT: 5,
|
||||
expSamples: []sample{
|
||||
{0, 0, nil, nil},
|
||||
{1, 1, nil, nil},
|
||||
{2, 2, nil, nil},
|
||||
{3, 3, nil, nil},
|
||||
{4, 4, nil, nil},
|
||||
{5, 5, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if bounds set and only mint is more than actual mint",
|
||||
inputChunk: newTestChunk(10),
|
||||
inputMinT: 5,
|
||||
inputMaxT: 9,
|
||||
expSamples: []sample{
|
||||
{5, 5, nil, nil},
|
||||
{6, 6, nil, nil},
|
||||
{7, 7, nil, nil},
|
||||
{8, 8, nil, nil},
|
||||
{9, 9, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if there are bounds set with seek before mint",
|
||||
inputChunk: newTestChunk(10),
|
||||
inputMinT: 3,
|
||||
inputMaxT: 7,
|
||||
initialSeek: 1,
|
||||
seekIsASuccess: true,
|
||||
expSamples: []sample{
|
||||
{3, 3, nil, nil},
|
||||
{4, 4, nil, nil},
|
||||
{5, 5, nil, nil},
|
||||
{6, 6, nil, nil},
|
||||
{7, 7, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if there are bounds set with seek between mint and maxt",
|
||||
inputChunk: newTestChunk(10),
|
||||
inputMinT: 3,
|
||||
inputMaxT: 7,
|
||||
initialSeek: 5,
|
||||
seekIsASuccess: true,
|
||||
expSamples: []sample{
|
||||
{5, 5, nil, nil},
|
||||
{6, 6, nil, nil},
|
||||
{7, 7, nil, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "if there are bounds set with seek after maxt",
|
||||
inputChunk: newTestChunk(10),
|
||||
inputMinT: 3,
|
||||
inputMaxT: 7,
|
||||
initialSeek: 8,
|
||||
seekIsASuccess: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) {
|
||||
iterable := boundedIterable{tc.inputChunk, tc.inputMinT, tc.inputMaxT}
|
||||
|
||||
var samples []sample
|
||||
it := iterable.Iterator(nil)
|
||||
|
||||
if tc.initialSeek != 0 {
|
||||
// Testing Seek()
|
||||
val := it.Seek(tc.initialSeek)
|
||||
require.Equal(t, tc.seekIsASuccess, val == chunkenc.ValFloat)
|
||||
if val == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
samples = append(samples, sample{t, v, nil, nil})
|
||||
}
|
||||
}
|
||||
|
||||
// Testing Next()
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
samples = append(samples, sample{t, v, nil, nil})
|
||||
}
|
||||
|
||||
// it.Next() should keep returning no value.
|
||||
for i := 0; i < 10; i++ {
|
||||
require.Equal(t, chunkenc.ValNone, it.Next())
|
||||
}
|
||||
|
||||
require.Equal(t, tc.expSamples, samples)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestChunk(numSamples int) chunkenc.Chunk {
|
||||
xor := chunkenc.NewXORChunk()
|
||||
a, _ := xor.Appender()
|
||||
for i := 0; i < numSamples; i++ {
|
||||
a.Append(int64(i), float64(i))
|
||||
}
|
||||
return xor
|
||||
}
|
||||
|
||||
// TestMemSeries_chunk runs a series of tests on memSeries.chunk() calls.
|
||||
// It will simulate various conditions to ensure all code paths in that function are covered.
|
||||
func TestMemSeries_chunk(t *testing.T) {
|
||||
|
|
|
@ -2665,6 +2665,14 @@ func TestIsolationWithoutAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOutOfOrderSamplesMetric(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOutOfOrderSamplesMetric(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
|
||||
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
|
||||
|
@ -2674,33 +2682,38 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
|
|||
}()
|
||||
db.DisableCompactions()
|
||||
|
||||
appendSample := func(appender storage.Appender, ts int64) (storage.SeriesRef, error) {
|
||||
ref, _, err := scenario.appendFunc(appender, labels.FromStrings("a", "b"), ts, 99)
|
||||
return ref, err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
app := db.Appender(ctx)
|
||||
for i := 1; i <= 5; i++ {
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), int64(i), 99)
|
||||
_, err = appendSample(app, int64(i))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Test out of order metric.
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
app = db.Appender(ctx)
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 2, 99)
|
||||
_, err = appendSample(app, 2)
|
||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 3, 99)
|
||||
_, err = appendSample(app, 3)
|
||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), 4, 99)
|
||||
_, err = appendSample(app, 4)
|
||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||
require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Compact Head to test out of bound metric.
|
||||
app = db.Appender(ctx)
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), DefaultBlockDuration*2, 99)
|
||||
_, err = appendSample(app, DefaultBlockDuration*2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
|
@ -2709,36 +2722,36 @@ func TestOutOfOrderSamplesMetric(t *testing.T) {
|
|||
require.Greater(t, db.head.minValidTime.Load(), int64(0))
|
||||
|
||||
app = db.Appender(ctx)
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()-2, 99)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()-2)
|
||||
require.Equal(t, storage.ErrOutOfBounds, err)
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)))
|
||||
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()-1, 99)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()-1)
|
||||
require.Equal(t, storage.ErrOutOfBounds, err)
|
||||
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfBoundSamples.WithLabelValues(scenario.sampleType)))
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Some more valid samples for out of order.
|
||||
app = db.Appender(ctx)
|
||||
for i := 1; i <= 5; i++ {
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+int64(i), 99)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+int64(i))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// Test out of order metric.
|
||||
app = db.Appender(ctx)
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+2, 99)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+2)
|
||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+3, 99)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+3)
|
||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||
require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
|
||||
_, err = app.Append(0, labels.FromStrings("a", "b"), db.head.minValidTime.Load()+DefaultBlockDuration+4, 99)
|
||||
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+4)
|
||||
require.Equal(t, storage.ErrOutOfOrderSample, err)
|
||||
require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat)))
|
||||
require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
|
@ -4801,6 +4814,14 @@ func TestWBLReplay(t *testing.T) {
|
|||
|
||||
// TestOOOMmapReplay checks the replay at a low level.
|
||||
func TestOOOMmapReplay(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOMmapReplay(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
|
||||
require.NoError(t, err)
|
||||
|
@ -4820,8 +4841,7 @@ func TestOOOMmapReplay(t *testing.T) {
|
|||
l := labels.FromStrings("foo", "bar")
|
||||
appendSample := func(mins int64) {
|
||||
app := h.Appender(context.Background())
|
||||
ts, v := mins*time.Minute.Milliseconds(), float64(mins)
|
||||
_, err := app.Append(0, l, ts, v)
|
||||
_, _, err := scenario.appendFunc(app, l, mins*time.Minute.Milliseconds(), mins)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
@ -5096,6 +5116,14 @@ func TestReplayAfterMmapReplayError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOOOAppendWithNoSeries(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOAppendWithNoSeries(t, scenario.appendFunc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)) {
|
||||
dir := t.TempDir()
|
||||
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
|
||||
require.NoError(t, err)
|
||||
|
@ -5116,7 +5144,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
|
|||
|
||||
appendSample := func(lbls labels.Labels, ts int64) {
|
||||
app := h.Appender(context.Background())
|
||||
_, err := app.Append(0, lbls, ts*time.Minute.Milliseconds(), float64(ts))
|
||||
_, _, err := appendFunc(app, lbls, ts*time.Minute.Milliseconds(), ts)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
@ -5164,7 +5192,7 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
|
|||
// Now 179m is too old.
|
||||
s4 := newLabels(4)
|
||||
app := h.Appender(context.Background())
|
||||
_, err = app.Append(0, s4, 179*time.Minute.Milliseconds(), float64(179))
|
||||
_, _, err = appendFunc(app, s4, 179*time.Minute.Milliseconds(), 179)
|
||||
require.Equal(t, storage.ErrTooOldSample, err)
|
||||
require.NoError(t, app.Rollback())
|
||||
verifyOOOSamples(s3, 1)
|
||||
|
@ -5177,6 +5205,14 @@ func TestOOOAppendWithNoSeries(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHeadMinOOOTimeUpdate(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testHeadMinOOOTimeUpdate(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testHeadMinOOOTimeUpdate(t *testing.T, scenario sampleTypeScenario) {
|
||||
dir := t.TempDir()
|
||||
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
|
||||
require.NoError(t, err)
|
||||
|
@ -5195,15 +5231,13 @@ func TestHeadMinOOOTimeUpdate(t *testing.T) {
|
|||
require.NoError(t, h.Init(0))
|
||||
|
||||
appendSample := func(ts int64) {
|
||||
lbls := labels.FromStrings("foo", "bar")
|
||||
app := h.Appender(context.Background())
|
||||
_, err := app.Append(0, lbls, ts*time.Minute.Milliseconds(), float64(ts))
|
||||
_, _, err = scenario.appendFunc(app, labels.FromStrings("a", "b"), ts*time.Minute.Milliseconds(), ts)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
appendSample(300) // In-order sample.
|
||||
|
||||
require.Equal(t, int64(math.MaxInt64), h.MinOOOTime())
|
||||
|
||||
appendSample(295) // OOO sample.
|
||||
|
|
|
@ -126,7 +126,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
|||
}
|
||||
// At the moment the only possible error here is out of order exemplars, which we shouldn't see when
|
||||
// replaying the WAL, so lets just log the error if it's not that type.
|
||||
err = h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
|
||||
err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels})
|
||||
if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) {
|
||||
level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err)
|
||||
}
|
||||
|
@ -448,7 +448,7 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m
|
|||
) {
|
||||
level.Debug(h.logger).Log(
|
||||
"msg", "M-mapped chunks overlap on a duplicate series record",
|
||||
"series", mSeries.lset.String(),
|
||||
"series", mSeries.labels().String(),
|
||||
"oldref", mSeries.ref,
|
||||
"oldmint", mSeries.mmappedChunks[0].minTime,
|
||||
"oldmaxt", mSeries.mmappedChunks[len(mSeries.mmappedChunks)-1].maxTime,
|
||||
|
@ -932,7 +932,7 @@ func (s *memSeries) encodeToSnapshotRecord(b []byte) []byte {
|
|||
|
||||
buf.PutByte(chunkSnapshotRecordTypeSeries)
|
||||
buf.PutBE64(uint64(s.ref))
|
||||
record.EncodeLabels(&buf, s.lset)
|
||||
record.EncodeLabels(&buf, s.labels())
|
||||
buf.PutBE64int64(0) // Backwards-compatibility; was chunkRange but now unused.
|
||||
|
||||
s.Lock()
|
||||
|
@ -1485,7 +1485,7 @@ Outer:
|
|||
continue
|
||||
}
|
||||
|
||||
if err := h.exemplars.AddExemplar(ms.lset, exemplar.Exemplar{
|
||||
if err := h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{
|
||||
Labels: e.Labels,
|
||||
Value: e.V,
|
||||
Ts: e.T,
|
||||
|
|
|
@ -755,9 +755,7 @@ func (it *ListPostings) Seek(x storage.SeriesRef) bool {
|
|||
}
|
||||
|
||||
// Do binary search between current position and end.
|
||||
i := sort.Search(len(it.list), func(i int) bool {
|
||||
return it.list[i] >= x
|
||||
})
|
||||
i, _ := slices.BinarySearch(it.list, x)
|
||||
if i < len(it.list) {
|
||||
it.cur = it.list[i]
|
||||
it.list = it.list[i+1:]
|
||||
|
|
|
@ -78,7 +78,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
oh.head.metrics.seriesNotFound.Inc()
|
||||
return storage.ErrNotFound
|
||||
}
|
||||
builder.Assign(s.lset)
|
||||
builder.Assign(s.labels())
|
||||
|
||||
if chks == nil {
|
||||
return nil
|
||||
|
@ -94,48 +94,32 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
|
||||
tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks))
|
||||
|
||||
// We define these markers to track the last chunk reference while we
|
||||
// fill the chunk meta.
|
||||
// These markers are useful to give consistent responses to repeated queries
|
||||
// even if new chunks that might be overlapping or not are added afterwards.
|
||||
// Also, lastMinT and lastMaxT are initialized to the max int as a sentinel
|
||||
// value to know they are unset.
|
||||
var lastChunkRef chunks.ChunkRef
|
||||
lastMinT, lastMaxT := int64(math.MaxInt64), int64(math.MaxInt64)
|
||||
|
||||
addChunk := func(minT, maxT int64, ref chunks.ChunkRef) {
|
||||
// the first time we get called is for the last included chunk.
|
||||
// set the markers accordingly
|
||||
if lastMinT == int64(math.MaxInt64) {
|
||||
lastChunkRef = ref
|
||||
lastMinT = minT
|
||||
lastMaxT = maxT
|
||||
}
|
||||
|
||||
addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) {
|
||||
tmpChks = append(tmpChks, chunks.Meta{
|
||||
MinTime: minT,
|
||||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
OOOLastRef: lastChunkRef,
|
||||
OOOLastMinTime: lastMinT,
|
||||
OOOLastMaxTime: lastMaxT,
|
||||
MinTime: minT,
|
||||
MaxTime: maxT,
|
||||
Ref: ref,
|
||||
Chunk: chunk,
|
||||
})
|
||||
}
|
||||
|
||||
// Collect all chunks that overlap the query range, in order from most recent to most old,
|
||||
// so we can set the correct markers.
|
||||
// Collect all chunks that overlap the query range.
|
||||
if s.ooo.oooHeadChunk != nil {
|
||||
c := s.ooo.oooHeadChunk
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
|
||||
addChunk(c.minTime, c.maxTime, ref)
|
||||
var xor chunkenc.Chunk
|
||||
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
|
||||
xor, _ = c.chunk.ToXOR() // Ignoring error because it can't fail.
|
||||
}
|
||||
addChunk(c.minTime, c.maxTime, ref, xor)
|
||||
}
|
||||
}
|
||||
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
|
||||
c := s.ooo.oooMmappedChunks[i]
|
||||
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) {
|
||||
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i)))
|
||||
addChunk(c.minTime, c.maxTime, ref)
|
||||
addChunk(c.minTime, c.maxTime, ref, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,6 +147,12 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
|
|||
case c.MaxTime > maxTime:
|
||||
maxTime = c.MaxTime
|
||||
(*chks)[len(*chks)-1].MaxTime = c.MaxTime
|
||||
fallthrough
|
||||
default:
|
||||
// If the head OOO chunk is part of an output chunk, copy the chunk pointer.
|
||||
if c.Chunk != nil {
|
||||
(*chks)[len(*chks)-1].Chunk = c.Chunk
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,10 +175,8 @@ func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matc
|
|||
}
|
||||
|
||||
type chunkMetaAndChunkDiskMapperRef struct {
|
||||
meta chunks.Meta
|
||||
ref chunks.ChunkDiskMapperRef
|
||||
origMinT int64
|
||||
origMaxT int64
|
||||
meta chunks.Meta
|
||||
ref chunks.ChunkDiskMapperRef
|
||||
}
|
||||
|
||||
func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int {
|
||||
|
|
|
@ -304,18 +304,6 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
s1, _, _ := h.getOrCreate(s1ID, s1Lset)
|
||||
s1.ooo = &memSeriesOOOFields{}
|
||||
|
||||
var lastChunk chunkInterval
|
||||
var lastChunkPos int
|
||||
|
||||
// the marker should be set based on whichever is the last chunk/interval that overlaps with the query range
|
||||
for i, interv := range intervals {
|
||||
if overlapsClosedInterval(interv.mint, interv.maxt, tc.queryMinT, tc.queryMaxT) {
|
||||
lastChunk = interv
|
||||
lastChunkPos = i
|
||||
}
|
||||
}
|
||||
lastChunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(1, chunks.HeadChunkID(uint64(lastChunkPos))))
|
||||
|
||||
// define our expected chunks, by looking at the expected ChunkIntervals and setting...
|
||||
var expChunks []chunks.Meta
|
||||
for _, e := range tc.expChunks {
|
||||
|
@ -323,10 +311,6 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
Chunk: chunkenc.Chunk(nil),
|
||||
MinTime: e.mint,
|
||||
MaxTime: e.maxt,
|
||||
// markers based on the last chunk we found above
|
||||
OOOLastMinTime: lastChunk.mint,
|
||||
OOOLastMaxTime: lastChunk.maxt,
|
||||
OOOLastRef: lastChunkRef,
|
||||
}
|
||||
|
||||
// Ref to whatever Ref the chunk has, that we refer to by ID
|
||||
|
@ -343,6 +327,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
if headChunk && len(intervals) > 0 {
|
||||
// Put the last interval in the head chunk
|
||||
s1.ooo.oooHeadChunk = &oooHeadChunk{
|
||||
chunk: NewOOOChunk(),
|
||||
minTime: intervals[len(intervals)-1].mint,
|
||||
maxTime: intervals[len(intervals)-1].maxt,
|
||||
}
|
||||
|
@ -374,6 +359,15 @@ func TestOOOHeadIndexReader_Series(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOHeadChunkReader_LabelValues(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:revive // unexported-return.
|
||||
func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenario) {
|
||||
chunkRange := int64(2000)
|
||||
head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true)
|
||||
t.Cleanup(func() { require.NoError(t, head.Close()) })
|
||||
|
@ -383,15 +377,15 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
|
|||
app := head.Appender(context.Background())
|
||||
|
||||
// Add in-order samples
|
||||
_, err := app.Append(0, labels.FromStrings("foo", "bar1"), 100, 1)
|
||||
_, _, err := scenario.appendFunc(app, labels.FromStrings("foo", "bar1"), 100, int64(1))
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, labels.FromStrings("foo", "bar2"), 100, 2)
|
||||
_, _, err = scenario.appendFunc(app, labels.FromStrings("foo", "bar2"), 100, int64(2))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Add ooo samples for those series
|
||||
_, err = app.Append(0, labels.FromStrings("foo", "bar1"), 90, 1)
|
||||
_, _, err = scenario.appendFunc(app, labels.FromStrings("foo", "bar1"), 90, int64(1))
|
||||
require.NoError(t, err)
|
||||
_, err = app.Append(0, labels.FromStrings("foo", "bar2"), 90, 2)
|
||||
_, _, err = scenario.appendFunc(app, labels.FromStrings("foo", "bar2"), 90, int64(2))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
|
@ -468,6 +462,15 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
|
|||
// It does so by appending out of order samples to the db and then initializing
|
||||
// an OOOHeadChunkReader to read chunks from it.
|
||||
func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOHeadChunkReader_Chunk(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:revive // unexported-return.
|
||||
func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 5
|
||||
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
|
||||
|
@ -475,12 +478,6 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
s1 := labels.FromStrings("l", "v1")
|
||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||
|
||||
appendSample := func(app storage.Appender, l labels.Labels, timestamp int64, value float64) storage.SeriesRef {
|
||||
ref, err := app.Append(0, l, timestamp, value)
|
||||
require.NoError(t, err)
|
||||
return ref
|
||||
}
|
||||
|
||||
t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) {
|
||||
db := newTestDBWithOpts(t, opts)
|
||||
|
||||
|
@ -499,7 +496,7 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT int64
|
||||
queryMaxT int64
|
||||
firstInOrderSampleAt int64
|
||||
inputSamples chunks.SampleSlice
|
||||
inputSamples []testValue
|
||||
expChunkError bool
|
||||
expChunksSamples []chunks.SampleSlice
|
||||
}{
|
||||
|
@ -508,9 +505,9 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
sample{t: minutes(40), f: float64(0)},
|
||||
inputSamples: []testValue{
|
||||
{Ts: minutes(30), V: 0},
|
||||
{Ts: minutes(40), V: 0},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -519,8 +516,8 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [--------] (With 2 samples)
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
sample{t: minutes(40), f: float64(0)},
|
||||
scenario.sampleFunc(minutes(30), 0),
|
||||
scenario.sampleFunc(minutes(40), 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -529,19 +526,8 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
// opts.OOOCapMax is 5 so these will be mmapped to the first mmapped chunk
|
||||
sample{t: minutes(41), f: float64(0)},
|
||||
sample{t: minutes(42), f: float64(0)},
|
||||
sample{t: minutes(43), f: float64(0)},
|
||||
sample{t: minutes(44), f: float64(0)},
|
||||
sample{t: minutes(45), f: float64(0)},
|
||||
// The following samples will go to the head chunk, and we want it
|
||||
// to overlap with the previous chunk
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(50), f: float64(1)},
|
||||
},
|
||||
expChunkError: false,
|
||||
inputSamples: []testValue{{Ts: minutes(41), V: 0}, {Ts: minutes(42), V: 0}, {Ts: minutes(43), V: 0}, {Ts: minutes(44), V: 0}, {Ts: minutes(45), V: 0}, {Ts: minutes(30), V: 1}, {Ts: minutes(50), V: 1}},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
// Query Interval [------------------------------------------------------------------------------------------]
|
||||
// Chunk 0 [---] (With 5 samples)
|
||||
|
@ -549,13 +535,13 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [-----------------] (With 7 samples)
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(41), f: float64(0)},
|
||||
sample{t: minutes(42), f: float64(0)},
|
||||
sample{t: minutes(43), f: float64(0)},
|
||||
sample{t: minutes(44), f: float64(0)},
|
||||
sample{t: minutes(45), f: float64(0)},
|
||||
sample{t: minutes(50), f: float64(1)},
|
||||
scenario.sampleFunc(minutes(30), 1),
|
||||
scenario.sampleFunc(minutes(41), 0),
|
||||
scenario.sampleFunc(minutes(42), 0),
|
||||
scenario.sampleFunc(minutes(43), 0),
|
||||
scenario.sampleFunc(minutes(44), 0),
|
||||
scenario.sampleFunc(minutes(45), 0),
|
||||
scenario.sampleFunc(minutes(50), 1),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -564,28 +550,28 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
inputSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(12), f: float64(0)},
|
||||
sample{t: minutes(14), f: float64(0)},
|
||||
sample{t: minutes(16), f: float64(0)},
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
{Ts: minutes(10), V: 0},
|
||||
{Ts: minutes(12), V: 0},
|
||||
{Ts: minutes(14), V: 0},
|
||||
{Ts: minutes(16), V: 0},
|
||||
{Ts: minutes(20), V: 0},
|
||||
// Chunk 1
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(22), f: float64(1)},
|
||||
sample{t: minutes(24), f: float64(1)},
|
||||
sample{t: minutes(26), f: float64(1)},
|
||||
sample{t: minutes(29), f: float64(1)},
|
||||
// Chunk 2
|
||||
sample{t: minutes(30), f: float64(2)},
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(34), f: float64(2)},
|
||||
sample{t: minutes(36), f: float64(2)},
|
||||
sample{t: minutes(40), f: float64(2)},
|
||||
{Ts: minutes(20), V: 1},
|
||||
{Ts: minutes(22), V: 1},
|
||||
{Ts: minutes(24), V: 1},
|
||||
{Ts: minutes(26), V: 1},
|
||||
{Ts: minutes(29), V: 1},
|
||||
// Chunk 3
|
||||
{Ts: minutes(30), V: 2},
|
||||
{Ts: minutes(32), V: 2},
|
||||
{Ts: minutes(34), V: 2},
|
||||
{Ts: minutes(36), V: 2},
|
||||
{Ts: minutes(40), V: 2},
|
||||
// Head
|
||||
sample{t: minutes(40), f: float64(3)},
|
||||
sample{t: minutes(50), f: float64(3)},
|
||||
{Ts: minutes(40), V: 3},
|
||||
{Ts: minutes(50), V: 3},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -597,23 +583,23 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [----------------][-----------------]
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(12), f: float64(0)},
|
||||
sample{t: minutes(14), f: float64(0)},
|
||||
sample{t: minutes(16), f: float64(0)},
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(22), f: float64(1)},
|
||||
sample{t: minutes(24), f: float64(1)},
|
||||
sample{t: minutes(26), f: float64(1)},
|
||||
sample{t: minutes(29), f: float64(1)},
|
||||
scenario.sampleFunc(minutes(10), 0),
|
||||
scenario.sampleFunc(minutes(12), 0),
|
||||
scenario.sampleFunc(minutes(14), 0),
|
||||
scenario.sampleFunc(minutes(16), 0),
|
||||
scenario.sampleFunc(minutes(20), 1),
|
||||
scenario.sampleFunc(minutes(22), 1),
|
||||
scenario.sampleFunc(minutes(24), 1),
|
||||
scenario.sampleFunc(minutes(26), 1),
|
||||
scenario.sampleFunc(minutes(29), 1),
|
||||
},
|
||||
{
|
||||
sample{t: minutes(30), f: float64(2)},
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(34), f: float64(2)},
|
||||
sample{t: minutes(36), f: float64(2)},
|
||||
sample{t: minutes(40), f: float64(3)},
|
||||
sample{t: minutes(50), f: float64(3)},
|
||||
scenario.sampleFunc(minutes(30), 2),
|
||||
scenario.sampleFunc(minutes(32), 2),
|
||||
scenario.sampleFunc(minutes(34), 2),
|
||||
scenario.sampleFunc(minutes(36), 2),
|
||||
scenario.sampleFunc(minutes(40), 3),
|
||||
scenario.sampleFunc(minutes(50), 3),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -622,28 +608,28 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
inputSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(40), f: float64(0)},
|
||||
sample{t: minutes(42), f: float64(0)},
|
||||
sample{t: minutes(44), f: float64(0)},
|
||||
sample{t: minutes(46), f: float64(0)},
|
||||
sample{t: minutes(50), f: float64(0)},
|
||||
{Ts: minutes(40), V: 0},
|
||||
{Ts: minutes(42), V: 0},
|
||||
{Ts: minutes(44), V: 0},
|
||||
{Ts: minutes(46), V: 0},
|
||||
{Ts: minutes(50), V: 0},
|
||||
// Chunk 1
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(32), f: float64(1)},
|
||||
sample{t: minutes(34), f: float64(1)},
|
||||
sample{t: minutes(36), f: float64(1)},
|
||||
sample{t: minutes(40), f: float64(1)},
|
||||
// Chunk 2
|
||||
sample{t: minutes(20), f: float64(2)},
|
||||
sample{t: minutes(22), f: float64(2)},
|
||||
sample{t: minutes(24), f: float64(2)},
|
||||
sample{t: minutes(26), f: float64(2)},
|
||||
sample{t: minutes(29), f: float64(2)},
|
||||
{Ts: minutes(30), V: 1},
|
||||
{Ts: minutes(32), V: 1},
|
||||
{Ts: minutes(34), V: 1},
|
||||
{Ts: minutes(36), V: 1},
|
||||
{Ts: minutes(40), V: 1},
|
||||
// Chunk 3
|
||||
{Ts: minutes(20), V: 2},
|
||||
{Ts: minutes(22), V: 2},
|
||||
{Ts: minutes(24), V: 2},
|
||||
{Ts: minutes(26), V: 2},
|
||||
{Ts: minutes(29), V: 2},
|
||||
// Head
|
||||
sample{t: minutes(10), f: float64(3)},
|
||||
sample{t: minutes(20), f: float64(3)},
|
||||
{Ts: minutes(10), V: 3},
|
||||
{Ts: minutes(20), V: 3},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -655,23 +641,23 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [----------------][-----------------]
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(10), f: float64(3)},
|
||||
sample{t: minutes(20), f: float64(2)},
|
||||
sample{t: minutes(22), f: float64(2)},
|
||||
sample{t: minutes(24), f: float64(2)},
|
||||
sample{t: minutes(26), f: float64(2)},
|
||||
sample{t: minutes(29), f: float64(2)},
|
||||
scenario.sampleFunc(minutes(10), 3),
|
||||
scenario.sampleFunc(minutes(20), 2),
|
||||
scenario.sampleFunc(minutes(22), 2),
|
||||
scenario.sampleFunc(minutes(24), 2),
|
||||
scenario.sampleFunc(minutes(26), 2),
|
||||
scenario.sampleFunc(minutes(29), 2),
|
||||
},
|
||||
{
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(32), f: float64(1)},
|
||||
sample{t: minutes(34), f: float64(1)},
|
||||
sample{t: minutes(36), f: float64(1)},
|
||||
sample{t: minutes(40), f: float64(0)},
|
||||
sample{t: minutes(42), f: float64(0)},
|
||||
sample{t: minutes(44), f: float64(0)},
|
||||
sample{t: minutes(46), f: float64(0)},
|
||||
sample{t: minutes(50), f: float64(0)},
|
||||
scenario.sampleFunc(minutes(30), 1),
|
||||
scenario.sampleFunc(minutes(32), 1),
|
||||
scenario.sampleFunc(minutes(34), 1),
|
||||
scenario.sampleFunc(minutes(36), 1),
|
||||
scenario.sampleFunc(minutes(40), 0),
|
||||
scenario.sampleFunc(minutes(42), 0),
|
||||
scenario.sampleFunc(minutes(44), 0),
|
||||
scenario.sampleFunc(minutes(46), 0),
|
||||
scenario.sampleFunc(minutes(50), 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -680,28 +666,28 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
inputSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(12), f: float64(0)},
|
||||
sample{t: minutes(14), f: float64(0)},
|
||||
sample{t: minutes(16), f: float64(0)},
|
||||
sample{t: minutes(18), f: float64(0)},
|
||||
{Ts: minutes(10), V: 0},
|
||||
{Ts: minutes(12), V: 0},
|
||||
{Ts: minutes(14), V: 0},
|
||||
{Ts: minutes(16), V: 0},
|
||||
{Ts: minutes(18), V: 0},
|
||||
// Chunk 1
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(22), f: float64(1)},
|
||||
sample{t: minutes(24), f: float64(1)},
|
||||
sample{t: minutes(26), f: float64(1)},
|
||||
sample{t: minutes(28), f: float64(1)},
|
||||
// Chunk 2
|
||||
sample{t: minutes(30), f: float64(2)},
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(34), f: float64(2)},
|
||||
sample{t: minutes(36), f: float64(2)},
|
||||
sample{t: minutes(38), f: float64(2)},
|
||||
{Ts: minutes(20), V: 1},
|
||||
{Ts: minutes(22), V: 1},
|
||||
{Ts: minutes(24), V: 1},
|
||||
{Ts: minutes(26), V: 1},
|
||||
{Ts: minutes(28), V: 1},
|
||||
// Chunk 3
|
||||
{Ts: minutes(30), V: 2},
|
||||
{Ts: minutes(32), V: 2},
|
||||
{Ts: minutes(34), V: 2},
|
||||
{Ts: minutes(36), V: 2},
|
||||
{Ts: minutes(38), V: 2},
|
||||
// Head
|
||||
sample{t: minutes(40), f: float64(3)},
|
||||
sample{t: minutes(42), f: float64(3)},
|
||||
{Ts: minutes(40), V: 3},
|
||||
{Ts: minutes(42), V: 3},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -713,29 +699,29 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [-------][-------][-------][--------]
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(12), f: float64(0)},
|
||||
sample{t: minutes(14), f: float64(0)},
|
||||
sample{t: minutes(16), f: float64(0)},
|
||||
sample{t: minutes(18), f: float64(0)},
|
||||
scenario.sampleFunc(minutes(10), 0),
|
||||
scenario.sampleFunc(minutes(12), 0),
|
||||
scenario.sampleFunc(minutes(14), 0),
|
||||
scenario.sampleFunc(minutes(16), 0),
|
||||
scenario.sampleFunc(minutes(18), 0),
|
||||
},
|
||||
{
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(22), f: float64(1)},
|
||||
sample{t: minutes(24), f: float64(1)},
|
||||
sample{t: minutes(26), f: float64(1)},
|
||||
sample{t: minutes(28), f: float64(1)},
|
||||
scenario.sampleFunc(minutes(20), 1),
|
||||
scenario.sampleFunc(minutes(22), 1),
|
||||
scenario.sampleFunc(minutes(24), 1),
|
||||
scenario.sampleFunc(minutes(26), 1),
|
||||
scenario.sampleFunc(minutes(28), 1),
|
||||
},
|
||||
{
|
||||
sample{t: minutes(30), f: float64(2)},
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(34), f: float64(2)},
|
||||
sample{t: minutes(36), f: float64(2)},
|
||||
sample{t: minutes(38), f: float64(2)},
|
||||
scenario.sampleFunc(minutes(30), 2),
|
||||
scenario.sampleFunc(minutes(32), 2),
|
||||
scenario.sampleFunc(minutes(34), 2),
|
||||
scenario.sampleFunc(minutes(36), 2),
|
||||
scenario.sampleFunc(minutes(38), 2),
|
||||
},
|
||||
{
|
||||
sample{t: minutes(40), f: float64(3)},
|
||||
sample{t: minutes(42), f: float64(3)},
|
||||
scenario.sampleFunc(minutes(40), 3),
|
||||
scenario.sampleFunc(minutes(42), 3),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -744,22 +730,22 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
inputSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(15), f: float64(0)},
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
sample{t: minutes(25), f: float64(0)},
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
{Ts: minutes(10), V: 0},
|
||||
{Ts: minutes(15), V: 0},
|
||||
{Ts: minutes(20), V: 0},
|
||||
{Ts: minutes(25), V: 0},
|
||||
{Ts: minutes(30), V: 0},
|
||||
// Chunk 1
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
sample{t: minutes(42), f: float64(1)},
|
||||
{Ts: minutes(20), V: 1},
|
||||
{Ts: minutes(25), V: 1},
|
||||
{Ts: minutes(30), V: 1},
|
||||
{Ts: minutes(35), V: 1},
|
||||
{Ts: minutes(42), V: 1},
|
||||
// Chunk 2 Head
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(50), f: float64(2)},
|
||||
{Ts: minutes(32), V: 2},
|
||||
{Ts: minutes(50), V: 2},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -770,15 +756,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [-----------------------------------]
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(15), f: float64(0)},
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
sample{t: minutes(42), f: float64(1)},
|
||||
sample{t: minutes(50), f: float64(2)},
|
||||
scenario.sampleFunc(minutes(10), 0),
|
||||
scenario.sampleFunc(minutes(15), 0),
|
||||
scenario.sampleFunc(minutes(20), 1),
|
||||
scenario.sampleFunc(minutes(25), 1),
|
||||
scenario.sampleFunc(minutes(30), 1),
|
||||
scenario.sampleFunc(minutes(32), 2),
|
||||
scenario.sampleFunc(minutes(35), 1),
|
||||
scenario.sampleFunc(minutes(42), 1),
|
||||
scenario.sampleFunc(minutes(50), 2),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -787,22 +773,22 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
queryMinT: minutes(12),
|
||||
queryMaxT: minutes(33),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
inputSamples: chunks.SampleSlice{
|
||||
inputSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(15), f: float64(0)},
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
sample{t: minutes(25), f: float64(0)},
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
{Ts: minutes(10), V: 0},
|
||||
{Ts: minutes(15), V: 0},
|
||||
{Ts: minutes(20), V: 0},
|
||||
{Ts: minutes(25), V: 0},
|
||||
{Ts: minutes(30), V: 0},
|
||||
// Chunk 1
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
sample{t: minutes(42), f: float64(1)},
|
||||
{Ts: minutes(20), V: 1},
|
||||
{Ts: minutes(25), V: 1},
|
||||
{Ts: minutes(30), V: 1},
|
||||
{Ts: minutes(35), V: 1},
|
||||
{Ts: minutes(42), V: 1},
|
||||
// Chunk 2 Head
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(50), f: float64(2)},
|
||||
{Ts: minutes(32), V: 2},
|
||||
{Ts: minutes(50), V: 2},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -813,15 +799,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// Output Graphically [-----------------------------------]
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(10), f: float64(0)},
|
||||
sample{t: minutes(15), f: float64(0)},
|
||||
sample{t: minutes(20), f: float64(1)},
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(30), f: float64(1)},
|
||||
sample{t: minutes(32), f: float64(2)},
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
sample{t: minutes(42), f: float64(1)},
|
||||
sample{t: minutes(50), f: float64(2)},
|
||||
scenario.sampleFunc(minutes(10), 0),
|
||||
scenario.sampleFunc(minutes(15), 0),
|
||||
scenario.sampleFunc(minutes(20), 1),
|
||||
scenario.sampleFunc(minutes(25), 1),
|
||||
scenario.sampleFunc(minutes(30), 1),
|
||||
scenario.sampleFunc(minutes(32), 2),
|
||||
scenario.sampleFunc(minutes(35), 1),
|
||||
scenario.sampleFunc(minutes(42), 1),
|
||||
scenario.sampleFunc(minutes(50), 2),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -832,22 +818,24 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
db := newTestDBWithOpts(t, opts)
|
||||
|
||||
app := db.Appender(context.Background())
|
||||
s1Ref := appendSample(app, s1, tc.firstInOrderSampleAt, float64(tc.firstInOrderSampleAt/1*time.Minute.Milliseconds()))
|
||||
s1Ref, _, err := scenario.appendFunc(app, s1, tc.firstInOrderSampleAt, tc.firstInOrderSampleAt/1*time.Minute.Milliseconds())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// OOO few samples for s1.
|
||||
app = db.Appender(context.Background())
|
||||
for _, s := range tc.inputSamples {
|
||||
appendSample(app, s1, s.T(), s.F())
|
||||
_, _, err := scenario.appendFunc(app, s1, s.Ts, s.V)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// The Series method is the one that populates the chunk meta OOO
|
||||
// markers like OOOLastRef. These are then used by the ChunkReader.
|
||||
// The Series method populates the chunk metas, taking a copy of the
|
||||
// head OOO chunk if necessary. These are then used by the ChunkReader.
|
||||
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
var chks []chunks.Meta
|
||||
var b labels.ScratchBuilder
|
||||
err := ir.Series(s1Ref, &b, &chks)
|
||||
err = ir.Series(s1Ref, &b, &chks)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
||||
|
||||
|
@ -858,13 +846,10 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Nil(t, c)
|
||||
|
||||
var resultSamples chunks.SampleSlice
|
||||
it := iterable.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
t, v := it.At()
|
||||
resultSamples = append(resultSamples, sample{t: t, f: v})
|
||||
}
|
||||
require.Equal(t, tc.expChunksSamples[i], resultSamples)
|
||||
resultSamples, err := storage.ExpandSamples(it, nil)
|
||||
require.NoError(t, err)
|
||||
requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, true)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -879,6 +864,15 @@ func TestOOOHeadChunkReader_Chunk(t *testing.T) {
|
|||
// - Response B comes from : Series(), in parallel new samples added to the head, then Chunk()
|
||||
// - A == B
|
||||
func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(t *testing.T) {
|
||||
for name, scenario := range sampleTypeScenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(t, scenario)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:revive // unexported-return.
|
||||
func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(t *testing.T, scenario sampleTypeScenario) {
|
||||
opts := DefaultOptions()
|
||||
opts.OutOfOrderCapMax = 5
|
||||
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
|
||||
|
@ -886,19 +880,13 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
s1 := labels.FromStrings("l", "v1")
|
||||
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
|
||||
|
||||
appendSample := func(app storage.Appender, l labels.Labels, timestamp int64, value float64) storage.SeriesRef {
|
||||
ref, err := app.Append(0, l, timestamp, value)
|
||||
require.NoError(t, err)
|
||||
return ref
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
queryMinT int64
|
||||
queryMaxT int64
|
||||
firstInOrderSampleAt int64
|
||||
initialSamples chunks.SampleSlice
|
||||
samplesAfterSeriesCall chunks.SampleSlice
|
||||
initialSamples []testValue
|
||||
samplesAfterSeriesCall []testValue
|
||||
expChunkError bool
|
||||
expChunksSamples []chunks.SampleSlice
|
||||
}{
|
||||
|
@ -907,21 +895,21 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
initialSamples: chunks.SampleSlice{
|
||||
initialSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
sample{t: minutes(22), f: float64(0)},
|
||||
sample{t: minutes(24), f: float64(0)},
|
||||
sample{t: minutes(26), f: float64(0)},
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
{Ts: minutes(20), V: 0},
|
||||
{Ts: minutes(22), V: 0},
|
||||
{Ts: minutes(24), V: 0},
|
||||
{Ts: minutes(26), V: 0},
|
||||
{Ts: minutes(30), V: 0},
|
||||
// Chunk 1 Head
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
{Ts: minutes(25), V: 1},
|
||||
{Ts: minutes(35), V: 1},
|
||||
},
|
||||
samplesAfterSeriesCall: chunks.SampleSlice{
|
||||
sample{t: minutes(10), f: float64(1)},
|
||||
sample{t: minutes(32), f: float64(1)},
|
||||
sample{t: minutes(50), f: float64(1)},
|
||||
samplesAfterSeriesCall: []testValue{
|
||||
{Ts: minutes(10), V: 1},
|
||||
{Ts: minutes(32), V: 1},
|
||||
{Ts: minutes(50), V: 1},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -933,40 +921,39 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
// Output Graphically [------------] (With 8 samples, samples newer than lastmint or older than lastmaxt are omitted but the ones in between are kept)
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
sample{t: minutes(22), f: float64(0)},
|
||||
sample{t: minutes(24), f: float64(0)},
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(26), f: float64(0)},
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
sample{t: minutes(32), f: float64(1)}, // This sample was added after Series() but before Chunk() and its in between the lastmint and maxt so it should be kept
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
scenario.sampleFunc(minutes(20), 0),
|
||||
scenario.sampleFunc(minutes(22), 0),
|
||||
scenario.sampleFunc(minutes(24), 0),
|
||||
scenario.sampleFunc(minutes(25), 1),
|
||||
scenario.sampleFunc(minutes(26), 0),
|
||||
scenario.sampleFunc(minutes(30), 0),
|
||||
scenario.sampleFunc(minutes(35), 1),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "After Series() previous head gets mmapped after getting samples, new head gets new samples also overlapping, none of these should appear in the response.",
|
||||
name: "After Series() prev head gets mmapped after getting samples, new head gets new samples also overlapping, none of these should appear in response.",
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
initialSamples: chunks.SampleSlice{
|
||||
initialSamples: []testValue{
|
||||
// Chunk 0
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
sample{t: minutes(22), f: float64(0)},
|
||||
sample{t: minutes(24), f: float64(0)},
|
||||
sample{t: minutes(26), f: float64(0)},
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
{Ts: minutes(20), V: 0},
|
||||
{Ts: minutes(22), V: 0},
|
||||
{Ts: minutes(24), V: 0},
|
||||
{Ts: minutes(26), V: 0},
|
||||
{Ts: minutes(30), V: 0},
|
||||
// Chunk 1 Head
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
{Ts: minutes(25), V: 1},
|
||||
{Ts: minutes(35), V: 1},
|
||||
},
|
||||
samplesAfterSeriesCall: chunks.SampleSlice{
|
||||
sample{t: minutes(10), f: float64(1)},
|
||||
sample{t: minutes(32), f: float64(1)},
|
||||
sample{t: minutes(50), f: float64(1)},
|
||||
samplesAfterSeriesCall: []testValue{
|
||||
{Ts: minutes(10), V: 1},
|
||||
{Ts: minutes(32), V: 1},
|
||||
{Ts: minutes(50), V: 1},
|
||||
// Chunk 1 gets mmapped and Chunk 2, the new head is born
|
||||
sample{t: minutes(25), f: float64(2)},
|
||||
sample{t: minutes(31), f: float64(2)},
|
||||
{Ts: minutes(25), V: 2},
|
||||
{Ts: minutes(31), V: 2},
|
||||
},
|
||||
expChunkError: false,
|
||||
// ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100
|
||||
|
@ -979,14 +966,13 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
// Output Graphically [------------] (8 samples) It has 5 from Chunk 0 and 3 from Chunk 1
|
||||
expChunksSamples: []chunks.SampleSlice{
|
||||
{
|
||||
sample{t: minutes(20), f: float64(0)},
|
||||
sample{t: minutes(22), f: float64(0)},
|
||||
sample{t: minutes(24), f: float64(0)},
|
||||
sample{t: minutes(25), f: float64(1)},
|
||||
sample{t: minutes(26), f: float64(0)},
|
||||
sample{t: minutes(30), f: float64(0)},
|
||||
sample{t: minutes(32), f: float64(1)}, // This sample was added after Series() but before Chunk() and its in between the lastmint and maxt so it should be kept
|
||||
sample{t: minutes(35), f: float64(1)},
|
||||
scenario.sampleFunc(minutes(20), 0),
|
||||
scenario.sampleFunc(minutes(22), 0),
|
||||
scenario.sampleFunc(minutes(24), 0),
|
||||
scenario.sampleFunc(minutes(25), 1),
|
||||
scenario.sampleFunc(minutes(26), 0),
|
||||
scenario.sampleFunc(minutes(30), 0),
|
||||
scenario.sampleFunc(minutes(35), 1),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -997,22 +983,24 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
db := newTestDBWithOpts(t, opts)
|
||||
|
||||
app := db.Appender(context.Background())
|
||||
s1Ref := appendSample(app, s1, tc.firstInOrderSampleAt, float64(tc.firstInOrderSampleAt/1*time.Minute.Milliseconds()))
|
||||
s1Ref, _, err := scenario.appendFunc(app, s1, tc.firstInOrderSampleAt, tc.firstInOrderSampleAt/1*time.Minute.Milliseconds())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// OOO few samples for s1.
|
||||
app = db.Appender(context.Background())
|
||||
for _, s := range tc.initialSamples {
|
||||
appendSample(app, s1, s.T(), s.F())
|
||||
_, _, err := scenario.appendFunc(app, s1, s.Ts, s.V)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
// The Series method is the one that populates the chunk meta OOO
|
||||
// markers like OOOLastRef. These are then used by the ChunkReader.
|
||||
// The Series method populates the chunk metas, taking a copy of the
|
||||
// head OOO chunk if necessary. These are then used by the ChunkReader.
|
||||
ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0)
|
||||
var chks []chunks.Meta
|
||||
var b labels.ScratchBuilder
|
||||
err := ir.Series(s1Ref, &b, &chks)
|
||||
err = ir.Series(s1Ref, &b, &chks)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tc.expChunksSamples), len(chks))
|
||||
|
||||
|
@ -1020,7 +1008,8 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
// OOO few samples for s1.
|
||||
app = db.Appender(context.Background())
|
||||
for _, s := range tc.samplesAfterSeriesCall {
|
||||
appendSample(app, s1, s.T(), s.F())
|
||||
_, _, err = scenario.appendFunc(app, s1, s.Ts, s.V)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
|
||||
|
@ -1031,13 +1020,10 @@ func TestOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
require.NoError(t, err)
|
||||
require.Nil(t, c)
|
||||
|
||||
var resultSamples chunks.SampleSlice
|
||||
it := iterable.Iterator(nil)
|
||||
for it.Next() == chunkenc.ValFloat {
|
||||
ts, v := it.At()
|
||||
resultSamples = append(resultSamples, sample{t: ts, f: v})
|
||||
}
|
||||
require.Equal(t, tc.expChunksSamples[i], resultSamples)
|
||||
resultSamples, err := storage.ExpandSamples(it, nil)
|
||||
require.NoError(t, err)
|
||||
requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, true)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
176
tsdb/testutil.go
Normal file
176
tsdb/testutil.go
Normal file
|
@ -0,0 +1,176 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
)
|
||||
|
||||
const (
|
||||
float = "float"
|
||||
)
|
||||
|
||||
type testValue struct {
|
||||
Ts int64
|
||||
V int64
|
||||
CounterResetHeader histogram.CounterResetHint
|
||||
}
|
||||
|
||||
type sampleTypeScenario struct {
|
||||
sampleType string
|
||||
appendFunc func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error)
|
||||
sampleFunc func(ts, value int64) sample
|
||||
}
|
||||
|
||||
// TODO: native histogram sample types will be added as part of out-of-order native histogram support; see #11220.
|
||||
var sampleTypeScenarios = map[string]sampleTypeScenario{
|
||||
float: {
|
||||
sampleType: sampleMetricTypeFloat,
|
||||
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
s := sample{t: ts, f: float64(value)}
|
||||
ref, err := appender.Append(0, lbls, ts, s.f)
|
||||
return ref, s, err
|
||||
},
|
||||
sampleFunc: func(ts, value int64) sample {
|
||||
return sample{t: ts, f: float64(value)}
|
||||
},
|
||||
},
|
||||
// intHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
// floatHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
// gaugeIntHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
// gaugeFloatHistogram: {
|
||||
// sampleType: sampleMetricTypeHistogram,
|
||||
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
// s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
|
||||
// ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
|
||||
// return ref, s, err
|
||||
// },
|
||||
// sampleFunc: func(ts, value int64) sample {
|
||||
// return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
|
||||
// },
|
||||
// },
|
||||
}
|
||||
|
||||
// requireEqualSeries checks that the actual series are equal to the expected ones. It ignores the counter reset hints for histograms.
|
||||
func requireEqualSeries(t *testing.T, expected, actual map[string][]chunks.Sample, ignoreCounterResets bool) {
|
||||
for name, expectedItem := range expected {
|
||||
actualItem, ok := actual[name]
|
||||
require.True(t, ok, "Expected series %s not found", name)
|
||||
requireEqualSamples(t, name, expectedItem, actualItem, ignoreCounterResets)
|
||||
}
|
||||
for name := range actual {
|
||||
_, ok := expected[name]
|
||||
require.True(t, ok, "Unexpected series %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func requireEqualOOOSamples(t *testing.T, expectedSamples int, db *DB) {
|
||||
require.Equal(t, float64(expectedSamples),
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat))+
|
||||
prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram)),
|
||||
"number of ooo appended samples mismatch")
|
||||
}
|
||||
|
||||
func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sample, ignoreCounterResets bool) {
|
||||
require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name)
|
||||
for i, s := range expected {
|
||||
expectedSample := s
|
||||
actualSample := actual[i]
|
||||
require.Equal(t, expectedSample.T(), actualSample.T(), "Different timestamps for %s[%d]", name, i)
|
||||
require.Equal(t, expectedSample.Type().String(), actualSample.Type().String(), "Different types for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
switch {
|
||||
case s.H() != nil:
|
||||
{
|
||||
expectedHist := expectedSample.H()
|
||||
actualHist := actualSample.H()
|
||||
if ignoreCounterResets && expectedHist.CounterResetHint != histogram.GaugeType {
|
||||
expectedHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
actualHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
} else {
|
||||
require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint))
|
||||
}
|
||||
require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
}
|
||||
case s.FH() != nil:
|
||||
{
|
||||
expectedHist := expectedSample.FH()
|
||||
actualHist := actualSample.FH()
|
||||
if ignoreCounterResets {
|
||||
expectedHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
actualHist.CounterResetHint = histogram.UnknownCounterReset
|
||||
} else {
|
||||
require.Equal(t, expectedHist.CounterResetHint, actualHist.CounterResetHint, "Sample header doesn't match for %s[%d] at ts %d, expected: %s, actual: %s", name, i, expectedSample.T(), counterResetAsString(expectedHist.CounterResetHint), counterResetAsString(actualHist.CounterResetHint))
|
||||
}
|
||||
require.Equal(t, expectedHist, actualHist, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
}
|
||||
default:
|
||||
expectedFloat := expectedSample.F()
|
||||
actualFloat := actualSample.F()
|
||||
require.Equal(t, expectedFloat, actualFloat, "Sample doesn't match for %s[%d] at ts %d", name, i, expectedSample.T())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func counterResetAsString(h histogram.CounterResetHint) string {
|
||||
switch h {
|
||||
case histogram.UnknownCounterReset:
|
||||
return "UnknownCounterReset"
|
||||
case histogram.CounterReset:
|
||||
return "CounterReset"
|
||||
case histogram.NotCounterReset:
|
||||
return "NotCounterReset"
|
||||
case histogram.GaugeType:
|
||||
return "GaugeType"
|
||||
}
|
||||
panic("Unexpected counter reset type")
|
||||
}
|
|
@ -57,6 +57,7 @@ type WriteTo interface {
|
|||
AppendHistograms([]record.RefHistogramSample) bool
|
||||
AppendFloatHistograms([]record.RefFloatHistogramSample) bool
|
||||
StoreSeries([]record.RefSeries, int)
|
||||
StoreMetadata([]record.RefMetadata)
|
||||
|
||||
// Next two methods are intended for garbage-collection: first we call
|
||||
// UpdateSeriesSegment on all current series
|
||||
|
@ -88,6 +89,7 @@ type Watcher struct {
|
|||
lastCheckpoint string
|
||||
sendExemplars bool
|
||||
sendHistograms bool
|
||||
sendMetadata bool
|
||||
metrics *WatcherMetrics
|
||||
readerMetrics *LiveReaderMetrics
|
||||
|
||||
|
@ -170,7 +172,7 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
|||
}
|
||||
|
||||
// NewWatcher creates a new WAL watcher for a given WriteTo.
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms bool) *Watcher {
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
|
@ -183,6 +185,7 @@ func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logge
|
|||
name: name,
|
||||
sendExemplars: sendExemplars,
|
||||
sendHistograms: sendHistograms,
|
||||
sendMetadata: sendMetadata,
|
||||
|
||||
readNotify: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
|
@ -541,6 +544,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
histogramsToSend []record.RefHistogramSample
|
||||
floatHistograms []record.RefFloatHistogramSample
|
||||
floatHistogramsToSend []record.RefFloatHistogramSample
|
||||
metadata []record.RefMetadata
|
||||
)
|
||||
for r.Next() && !isClosed(w.quit) {
|
||||
rec := r.Record()
|
||||
|
@ -652,6 +656,17 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
|||
w.writer.AppendFloatHistograms(floatHistogramsToSend)
|
||||
floatHistogramsToSend = floatHistogramsToSend[:0]
|
||||
}
|
||||
|
||||
case record.Metadata:
|
||||
if !w.sendMetadata || !tail {
|
||||
break
|
||||
}
|
||||
meta, err := dec.Metadata(rec, metadata[:0])
|
||||
if err != nil {
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
return err
|
||||
}
|
||||
w.writer.StoreMetadata(meta)
|
||||
case record.Tombstones:
|
||||
|
||||
default:
|
||||
|
|
|
@ -92,6 +92,8 @@ func (wtm *writeToMock) StoreSeries(series []record.RefSeries, index int) {
|
|||
wtm.UpdateSeriesSegment(series, index)
|
||||
}
|
||||
|
||||
func (wtm *writeToMock) StoreMetadata(_ []record.RefMetadata) { /* no-op */ }
|
||||
|
||||
func (wtm *writeToMock) UpdateSeriesSegment(series []record.RefSeries, index int) {
|
||||
wtm.seriesLock.Lock()
|
||||
defer wtm.seriesLock.Unlock()
|
||||
|
@ -219,7 +221,7 @@ func TestTailSamples(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, true, true, true)
|
||||
watcher.SetStartTime(now)
|
||||
|
||||
// Set the Watcher's metrics so they're not nil pointers.
|
||||
|
@ -304,7 +306,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount
|
||||
|
@ -393,7 +395,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
readTimeout = time.Second
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
go watcher.Start()
|
||||
|
||||
expected := seriesCount * 2
|
||||
|
@ -464,7 +466,7 @@ func TestReadCheckpoint(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
go watcher.Start()
|
||||
|
||||
expectedSeries := seriesCount
|
||||
|
@ -533,7 +535,7 @@ func TestReadCheckpointMultipleSegments(t *testing.T) {
|
|||
}
|
||||
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
watcher.MaxSegment = -1
|
||||
|
||||
// Set the Watcher's metrics so they're not nil pointers.
|
||||
|
@ -606,7 +608,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
|
|||
|
||||
readTimeout = time.Second
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
watcher.MaxSegment = -1
|
||||
go watcher.Start()
|
||||
|
||||
|
@ -685,7 +687,7 @@ func TestRun_StartupTime(t *testing.T) {
|
|||
require.NoError(t, w.Close())
|
||||
|
||||
wt := newWriteToMock(0)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
watcher.MaxSegment = segments
|
||||
|
||||
watcher.setMetrics()
|
||||
|
@ -774,7 +776,7 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
|
|||
}()
|
||||
|
||||
wt := newWriteToMock(time.Millisecond)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false)
|
||||
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
|
||||
watcher.MaxSegment = segments
|
||||
|
||||
watcher.setMetrics()
|
||||
|
|
|
@ -71,31 +71,49 @@ func (a Annotations) AsErrors() []error {
|
|||
return arr
|
||||
}
|
||||
|
||||
// AsStrings is a convenience function to return the annotations map as a slice
|
||||
// of strings. The query string is used to get the line number and character offset
|
||||
// positioning info of the elements which trigger an annotation. We limit the number
|
||||
// of annotations returned here with maxAnnos (0 for no limit).
|
||||
func (a Annotations) AsStrings(query string, maxAnnos int) []string {
|
||||
arr := make([]string, 0, len(a))
|
||||
// AsStrings is a convenience function to return the annotations map as 2 slices
|
||||
// of strings, separated into warnings and infos. The query string is used to get the
|
||||
// line number and character offset positioning info of the elements which trigger an
|
||||
// annotation. We limit the number of warnings and infos returned here with maxWarnings
|
||||
// and maxInfos respectively (0 for no limit).
|
||||
func (a Annotations) AsStrings(query string, maxWarnings, maxInfos int) (warnings, infos []string) {
|
||||
warnings = make([]string, 0, maxWarnings+1)
|
||||
infos = make([]string, 0, maxInfos+1)
|
||||
warnSkipped := 0
|
||||
infoSkipped := 0
|
||||
for _, err := range a {
|
||||
if maxAnnos > 0 && len(arr) >= maxAnnos {
|
||||
break
|
||||
}
|
||||
var anErr annoErr
|
||||
if errors.As(err, &anErr) {
|
||||
anErr.Query = query
|
||||
err = anErr
|
||||
}
|
||||
arr = append(arr, err.Error())
|
||||
switch {
|
||||
case errors.Is(err, PromQLInfo):
|
||||
if maxInfos == 0 || len(infos) < maxInfos {
|
||||
infos = append(infos, err.Error())
|
||||
} else {
|
||||
infoSkipped++
|
||||
}
|
||||
default:
|
||||
if maxWarnings == 0 || len(warnings) < maxWarnings {
|
||||
warnings = append(warnings, err.Error())
|
||||
} else {
|
||||
warnSkipped++
|
||||
}
|
||||
}
|
||||
}
|
||||
if maxAnnos > 0 && len(a) > maxAnnos {
|
||||
arr = append(arr, fmt.Sprintf("%d more annotations omitted", len(a)-maxAnnos))
|
||||
if warnSkipped > 0 {
|
||||
warnings = append(warnings, fmt.Sprintf("%d more warning annotations omitted", warnSkipped))
|
||||
}
|
||||
return arr
|
||||
if infoSkipped > 0 {
|
||||
infos = append(infos, fmt.Sprintf("%d more info annotations omitted", infoSkipped))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a Annotations) CountWarningsAndInfo() (int, int) {
|
||||
var countWarnings, countInfo int
|
||||
// CountWarningsAndInfo counts and returns the number of warnings and infos in the
|
||||
// annotations wrapper.
|
||||
func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) {
|
||||
for _, err := range a {
|
||||
if errors.Is(err, PromQLWarning) {
|
||||
countWarnings++
|
||||
|
@ -104,7 +122,7 @@ func (a Annotations) CountWarningsAndInfo() (int, int) {
|
|||
countInfo++
|
||||
}
|
||||
}
|
||||
return countWarnings, countInfo
|
||||
return
|
||||
}
|
||||
|
||||
//nolint:revive // error-naming.
|
||||
|
@ -116,6 +134,7 @@ var (
|
|||
PromQLInfo = errors.New("PromQL info")
|
||||
PromQLWarning = errors.New("PromQL warning")
|
||||
|
||||
InvalidRatioWarning = fmt.Errorf("%w: ratio value should be between -1 and 1", PromQLWarning)
|
||||
InvalidQuantileWarning = fmt.Errorf("%w: quantile value should be between 0 and 1", PromQLWarning)
|
||||
BadBucketLabelWarning = fmt.Errorf("%w: bucket label %q is missing or has a malformed value", PromQLWarning, model.BucketLabel)
|
||||
MixedFloatsHistogramsWarning = fmt.Errorf("%w: encountered a mix of histograms and floats for", PromQLWarning)
|
||||
|
@ -155,6 +174,15 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error {
|
|||
}
|
||||
}
|
||||
|
||||
// NewInvalidQuantileWarning is used when the user specifies an invalid ratio
|
||||
// value, i.e. a float that is outside the range [-1, 1] or NaN.
|
||||
func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error {
|
||||
return annoErr{
|
||||
PositionRange: pos,
|
||||
Err: fmt.Errorf("%w, got %g, capping to %g", InvalidRatioWarning, q, to),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBadBucketLabelWarning is used when there is an error parsing the bucket label
|
||||
// of a classic histogram.
|
||||
func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error {
|
||||
|
|
|
@ -159,6 +159,7 @@ type Response struct {
|
|||
ErrorType errorType `json:"errorType,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
Infos []string `json:"infos,omitempty"`
|
||||
}
|
||||
|
||||
type apiFuncResult struct {
|
||||
|
@ -248,6 +249,7 @@ func NewAPI(
|
|||
registerer prometheus.Registerer,
|
||||
statsRenderer StatsRenderer,
|
||||
rwEnabled bool,
|
||||
acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
|
||||
otlpEnabled bool,
|
||||
) *API {
|
||||
a := &API{
|
||||
|
@ -290,7 +292,7 @@ func NewAPI(
|
|||
}
|
||||
|
||||
if rwEnabled {
|
||||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap)
|
||||
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs)
|
||||
}
|
||||
if otlpEnabled {
|
||||
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap)
|
||||
|
@ -1746,11 +1748,13 @@ func (api *API) cleanTombstones(*http.Request) apiFuncResult {
|
|||
// can be empty if the position information isn't needed.
|
||||
func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface{}, warnings annotations.Annotations, query string) {
|
||||
statusMessage := statusSuccess
|
||||
warn, info := warnings.AsStrings(query, 10, 10)
|
||||
|
||||
resp := &Response{
|
||||
Status: statusMessage,
|
||||
Data: data,
|
||||
Warnings: warnings.AsStrings(query, 10),
|
||||
Warnings: warn,
|
||||
Infos: info,
|
||||
}
|
||||
|
||||
codec, err := api.negotiateCodec(req, resp)
|
||||
|
|
|
@ -455,7 +455,7 @@ func TestEndpoints(t *testing.T) {
|
|||
|
||||
remote := remote.NewStorage(promlog.New(&promlogConfig), prometheus.DefaultRegisterer, func() (int64, error) {
|
||||
return 0, nil
|
||||
}, dbDir, 1*time.Second, nil)
|
||||
}, dbDir, 1*time.Second, nil, false)
|
||||
|
||||
err = remote.ApplyConfig(&config.Config{
|
||||
RemoteReadConfigs: []*config.RemoteReadConfig{
|
||||
|
|
|
@ -135,6 +135,7 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router {
|
|||
nil,
|
||||
nil,
|
||||
false,
|
||||
config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2},
|
||||
false,
|
||||
)
|
||||
|
||||
|
|
|
@ -66,6 +66,7 @@ interface APIResponse<T> {
|
|||
data?: T;
|
||||
error?: string;
|
||||
warnings?: string[];
|
||||
infos?: string[];
|
||||
}
|
||||
|
||||
// These are status codes where the Prometheus API still returns a valid JSON body,
|
||||
|
|
|
@ -544,6 +544,18 @@ export const aggregateOpTerms = [
|
|||
info: 'Group series, while setting the sample value to 1',
|
||||
type: 'keyword',
|
||||
},
|
||||
{
|
||||
label: 'limitk',
|
||||
detail: 'aggregation',
|
||||
info: 'Sample k elements',
|
||||
type: 'keyword',
|
||||
},
|
||||
{
|
||||
label: 'limit_ratio',
|
||||
detail: 'aggregation',
|
||||
info: 'Sample given ratio of elements',
|
||||
type: 'keyword',
|
||||
},
|
||||
{
|
||||
label: 'max',
|
||||
detail: 'aggregation',
|
||||
|
|
|
@ -28,6 +28,8 @@ import {
|
|||
Gtr,
|
||||
Identifier,
|
||||
LabelMatchers,
|
||||
LimitK,
|
||||
LimitRatio,
|
||||
Lss,
|
||||
Lte,
|
||||
MatrixSelector,
|
||||
|
@ -167,7 +169,13 @@ export class Parser {
|
|||
}
|
||||
this.expectType(params[params.length - 1], ValueType.vector, 'aggregation expression');
|
||||
// get the parameter of the aggregation operator
|
||||
if (aggregateOp.type.id === Topk || aggregateOp.type.id === Bottomk || aggregateOp.type.id === Quantile) {
|
||||
if (
|
||||
aggregateOp.type.id === Topk ||
|
||||
aggregateOp.type.id === Bottomk ||
|
||||
aggregateOp.type.id === LimitK ||
|
||||
aggregateOp.type.id === LimitRatio ||
|
||||
aggregateOp.type.id === Quantile
|
||||
) {
|
||||
this.expectType(params[0], ValueType.scalar, 'aggregation parameter');
|
||||
}
|
||||
if (aggregateOp.type.id === CountValues) {
|
||||
|
|
|
@ -22,7 +22,7 @@ export const promQLHighLight = styleTags({
|
|||
Identifier: tags.variableName,
|
||||
'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramAvg HistogramCount HistogramFraction HistogramQuantile HistogramSum HoltWinters Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year':
|
||||
tags.function(tags.variableName),
|
||||
'Avg Bottomk Count Count_values Group Max Min Quantile Stddev Stdvar Sum Topk': tags.operatorKeyword,
|
||||
'Avg Bottomk Count Count_values Group LimitK LimitRatio Max Min Quantile Stddev Stdvar Sum Topk': tags.operatorKeyword,
|
||||
'By Without Bool On Ignoring GroupLeft GroupRight Offset Start End': tags.modifier,
|
||||
'And Unless Or': tags.logicOperator,
|
||||
'Sub Add Mul Mod Div Atan2 Eql Neq Lte Lss Gte Gtr EqlRegex EqlSingle NeqRegex Pow At': tags.operator,
|
||||
|
|
|
@ -54,6 +54,8 @@ AggregateOp {
|
|||
Max |
|
||||
Min |
|
||||
Quantile |
|
||||
LimitK |
|
||||
LimitRatio |
|
||||
Stddev |
|
||||
Stdvar |
|
||||
Sum |
|
||||
|
@ -330,6 +332,8 @@ NumberLiteral {
|
|||
Max,
|
||||
Min,
|
||||
Quantile,
|
||||
LimitK,
|
||||
LimitRatio,
|
||||
Stddev,
|
||||
Stdvar,
|
||||
Sum,
|
||||
|
|
|
@ -33,6 +33,8 @@ import {
|
|||
On,
|
||||
Or,
|
||||
Quantile,
|
||||
LimitK,
|
||||
LimitRatio,
|
||||
Start,
|
||||
Stddev,
|
||||
Stdvar,
|
||||
|
@ -67,6 +69,8 @@ const contextualKeywordTokens = {
|
|||
max: Max,
|
||||
min: Min,
|
||||
quantile: Quantile,
|
||||
limitk: LimitK,
|
||||
limit_ratio: LimitRatio,
|
||||
stddev: Stddev,
|
||||
stdvar: Stdvar,
|
||||
sum: Sum,
|
||||
|
|
|
@ -2,22 +2,93 @@ import React, { FC } from 'react';
|
|||
import { UncontrolledTooltip } from 'reactstrap';
|
||||
import { Histogram } from '../../types/types';
|
||||
import { bucketRangeString } from './DataTable';
|
||||
import {
|
||||
calculateDefaultExpBucketWidth,
|
||||
findMinPositive,
|
||||
findMaxNegative,
|
||||
findZeroAxisLeft,
|
||||
showZeroAxis,
|
||||
findZeroBucket,
|
||||
ScaleType,
|
||||
} from './HistogramHelpers';
|
||||
|
||||
type ScaleType = 'linear' | 'exponential';
|
||||
interface HistogramChartProps {
|
||||
histogram: Histogram;
|
||||
index: number;
|
||||
scale: ScaleType;
|
||||
}
|
||||
|
||||
const HistogramChart: FC<{ histogram: Histogram; index: number; scale: ScaleType }> = ({ index, histogram, scale }) => {
|
||||
const HistogramChart: FC<HistogramChartProps> = ({ index, histogram, scale }) => {
|
||||
const { buckets } = histogram;
|
||||
const rangeMax = buckets ? parseFloat(buckets[buckets.length - 1][2]) : 0;
|
||||
const countMax = buckets ? buckets.map((b) => parseFloat(b[3])).reduce((a, b) => Math.max(a, b)) : 0;
|
||||
if (!buckets || buckets.length === 0) {
|
||||
return <div>No data</div>;
|
||||
}
|
||||
const formatter = Intl.NumberFormat('en', { notation: 'compact' });
|
||||
const positiveBuckets = buckets?.filter((b) => parseFloat(b[1]) >= 0); // we only want to show buckets with range >= 0
|
||||
const xLabelTicks = scale === 'linear' ? [0.25, 0.5, 0.75, 1] : [1];
|
||||
|
||||
// For linear scales, the count of a histogram bucket is represented by its area rather than its height. This means it considers
|
||||
// both the count and the range (width) of the bucket. For this, we can set the height of the bucket proportional
|
||||
// to its frequency density (fd). The fd is the count of the bucket divided by the width of the bucket.
|
||||
const fds = [];
|
||||
for (const bucket of buckets) {
|
||||
const left = parseFloat(bucket[1]);
|
||||
const right = parseFloat(bucket[2]);
|
||||
const count = parseFloat(bucket[3]);
|
||||
const width = right - left;
|
||||
|
||||
// This happens when a user want observations of precisely zero to be included in the zero bucket
|
||||
if (width === 0) {
|
||||
fds.push(0);
|
||||
continue;
|
||||
}
|
||||
fds.push(count / width);
|
||||
}
|
||||
const fdMax = Math.max(...fds);
|
||||
|
||||
const first = buckets[0];
|
||||
const last = buckets[buckets.length - 1];
|
||||
|
||||
const rangeMax = parseFloat(last[2]);
|
||||
const rangeMin = parseFloat(first[1]);
|
||||
const countMax = Math.max(...buckets.map((b) => parseFloat(b[3])));
|
||||
|
||||
const defaultExpBucketWidth = calculateDefaultExpBucketWidth(last, buckets);
|
||||
|
||||
const maxPositive = rangeMax > 0 ? rangeMax : 0;
|
||||
const minPositive = findMinPositive(buckets);
|
||||
const maxNegative = findMaxNegative(buckets);
|
||||
const minNegative = parseFloat(first[1]) < 0 ? parseFloat(first[1]) : 0;
|
||||
|
||||
// Calculate the borders of positive and negative buckets in the exponential scale from left to right
|
||||
const startNegative = minNegative !== 0 ? -Math.log(Math.abs(minNegative)) : 0;
|
||||
const endNegative = maxNegative !== 0 ? -Math.log(Math.abs(maxNegative)) : 0;
|
||||
const startPositive = minPositive !== 0 ? Math.log(minPositive) : 0;
|
||||
const endPositive = maxPositive !== 0 ? Math.log(maxPositive) : 0;
|
||||
|
||||
// Calculate the width of negative, positive, and all exponential bucket ranges on the x-axis
|
||||
const xWidthNegative = endNegative - startNegative;
|
||||
const xWidthPositive = endPositive - startPositive;
|
||||
const xWidthTotal = xWidthNegative + defaultExpBucketWidth + xWidthPositive;
|
||||
|
||||
const zeroBucketIdx = findZeroBucket(buckets);
|
||||
const zeroAxisLeft = findZeroAxisLeft(
|
||||
scale,
|
||||
rangeMin,
|
||||
rangeMax,
|
||||
minPositive,
|
||||
maxNegative,
|
||||
zeroBucketIdx,
|
||||
xWidthNegative,
|
||||
xWidthTotal,
|
||||
defaultExpBucketWidth
|
||||
);
|
||||
const zeroAxis = showZeroAxis(zeroAxisLeft);
|
||||
|
||||
return (
|
||||
<div className="histogram-y-wrapper">
|
||||
<div className="histogram-y-labels">
|
||||
{[1, 0.75, 0.5, 0.25].map((i) => (
|
||||
<div key={i} className="histogram-y-label">
|
||||
{formatter.format(countMax * i)}
|
||||
{scale === 'linear' ? '' : formatter.format(countMax * i)}
|
||||
</div>
|
||||
))}
|
||||
<div key={0} className="histogram-y-label" style={{ height: 0 }}>
|
||||
|
@ -31,62 +102,175 @@ const HistogramChart: FC<{ histogram: Histogram; index: number; scale: ScaleType
|
|||
<div className="histogram-y-grid" style={{ bottom: i * 100 + '%' }}></div>
|
||||
<div className="histogram-y-tick" style={{ bottom: i * 100 + '%' }}></div>
|
||||
<div className="histogram-x-grid" style={{ left: i * 100 + '%' }}></div>
|
||||
<div className="histogram-x-tick" style={{ left: i * 100 + '%' }}></div>
|
||||
</React.Fragment>
|
||||
))}
|
||||
{positiveBuckets?.map((b, bIdx) => {
|
||||
const bucketIdx = `bucket-${index}-${bIdx}-${Math.ceil(parseFloat(b[3]) * 100)}`;
|
||||
const bucketLeft =
|
||||
scale === 'linear' ? (parseFloat(b[1]) / rangeMax) * 100 + '%' : (bIdx / positiveBuckets.length) * 100 + '%';
|
||||
const bucketWidth =
|
||||
scale === 'linear'
|
||||
? ((parseFloat(b[2]) - parseFloat(b[1])) / rangeMax) * 100 + '%'
|
||||
: 100 / positiveBuckets.length + '%';
|
||||
return (
|
||||
<React.Fragment key={bIdx}>
|
||||
<div
|
||||
id={bucketIdx}
|
||||
className="histogram-bucket-slot"
|
||||
style={{
|
||||
left: bucketLeft,
|
||||
width: bucketWidth,
|
||||
}}
|
||||
>
|
||||
<div
|
||||
id={bucketIdx}
|
||||
className="histogram-bucket"
|
||||
style={{
|
||||
height: (parseFloat(b[3]) / countMax) * 100 + '%',
|
||||
}}
|
||||
></div>
|
||||
<UncontrolledTooltip
|
||||
style={{ maxWidth: 'unset', padding: 10, textAlign: 'left' }}
|
||||
placement="bottom"
|
||||
target={bucketIdx}
|
||||
>
|
||||
<strong>range:</strong> {bucketRangeString(b)}
|
||||
<br />
|
||||
<strong>count:</strong> {b[3]}
|
||||
</UncontrolledTooltip>
|
||||
</div>
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
<div className="histogram-x-tick" style={{ left: '0%' }}></div>
|
||||
<div className="histogram-x-tick" style={{ left: zeroAxisLeft }}></div>
|
||||
<div className="histogram-x-grid" style={{ left: zeroAxisLeft }}></div>
|
||||
<div className="histogram-x-tick" style={{ left: '100%' }}></div>
|
||||
|
||||
<RenderHistogramBars
|
||||
buckets={buckets}
|
||||
scale={scale}
|
||||
rangeMin={rangeMin}
|
||||
rangeMax={rangeMax}
|
||||
index={index}
|
||||
fds={fds}
|
||||
fdMax={fdMax}
|
||||
countMax={countMax}
|
||||
defaultExpBucketWidth={defaultExpBucketWidth}
|
||||
minPositive={minPositive}
|
||||
maxNegative={maxNegative}
|
||||
startPositive={startPositive}
|
||||
startNegative={startNegative}
|
||||
xWidthPositive={xWidthPositive}
|
||||
xWidthNegative={xWidthNegative}
|
||||
xWidthTotal={xWidthTotal}
|
||||
/>
|
||||
|
||||
<div className="histogram-axes"></div>
|
||||
</div>
|
||||
<div className="histogram-x-labels">
|
||||
<div key={0} className="histogram-x-label" style={{ width: 0 }}>
|
||||
0
|
||||
<div className="histogram-x-label">
|
||||
<React.Fragment>
|
||||
<div style={{ position: 'absolute', left: 0 }}>{formatter.format(rangeMin)}</div>
|
||||
{rangeMin < 0 && zeroAxis && <div style={{ position: 'absolute', left: zeroAxisLeft }}>0</div>}
|
||||
<div style={{ position: 'absolute', right: 0 }}>{formatter.format(rangeMax)}</div>
|
||||
</React.Fragment>
|
||||
</div>
|
||||
{xLabelTicks.map((i) => (
|
||||
<div key={i} className="histogram-x-label">
|
||||
<div style={{ position: 'absolute', right: i === 1 ? 0 : -18 }}>{formatter.format(rangeMax * i)}</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
interface RenderHistogramProps {
|
||||
buckets: [number, string, string, string][];
|
||||
scale: ScaleType;
|
||||
rangeMin: number;
|
||||
rangeMax: number;
|
||||
index: number;
|
||||
fds: number[];
|
||||
fdMax: number;
|
||||
countMax: number;
|
||||
defaultExpBucketWidth: number;
|
||||
minPositive: number;
|
||||
maxNegative: number;
|
||||
startPositive: number;
|
||||
startNegative: number;
|
||||
xWidthNegative: number;
|
||||
xWidthPositive: number;
|
||||
xWidthTotal: number;
|
||||
}
|
||||
|
||||
const RenderHistogramBars: FC<RenderHistogramProps> = ({
|
||||
buckets,
|
||||
scale,
|
||||
rangeMin,
|
||||
rangeMax,
|
||||
index,
|
||||
fds,
|
||||
fdMax,
|
||||
countMax,
|
||||
defaultExpBucketWidth,
|
||||
minPositive,
|
||||
maxNegative,
|
||||
startPositive,
|
||||
startNegative,
|
||||
xWidthNegative,
|
||||
xWidthPositive,
|
||||
xWidthTotal,
|
||||
}) => {
|
||||
return (
|
||||
<React.Fragment>
|
||||
{buckets.map((b, bIdx) => {
|
||||
const left = parseFloat(b[1]);
|
||||
const right = parseFloat(b[2]);
|
||||
const count = parseFloat(b[3]);
|
||||
const bucketIdx = `bucket-${index}-${bIdx}-${Math.ceil(parseFloat(b[3]) * 100)}`;
|
||||
|
||||
const logWidth = Math.abs(Math.log(Math.abs(right)) - Math.log(Math.abs(left)));
|
||||
const expBucketWidth = logWidth === 0 ? defaultExpBucketWidth : logWidth;
|
||||
|
||||
let bucketWidth = '';
|
||||
let bucketLeft = '';
|
||||
let bucketHeight = '';
|
||||
|
||||
switch (scale) {
|
||||
case 'linear':
|
||||
bucketWidth = ((right - left) / (rangeMax - rangeMin)) * 100 + '%';
|
||||
bucketLeft = ((left - rangeMin) / (rangeMax - rangeMin)) * 100 + '%';
|
||||
if (left === 0 && right === 0) {
|
||||
bucketLeft = '0%'; // do not render zero-width zero bucket
|
||||
bucketWidth = '0%';
|
||||
}
|
||||
bucketHeight = (fds[bIdx] / fdMax) * 100 + '%';
|
||||
break;
|
||||
case 'exponential':
|
||||
let adjust = 0; // if buckets are all positive/negative, we need to remove the width of the zero bucket
|
||||
if (minPositive === 0 || maxNegative === 0) {
|
||||
adjust = defaultExpBucketWidth;
|
||||
}
|
||||
bucketWidth = (expBucketWidth / (xWidthTotal - adjust)) * 100 + '%';
|
||||
if (left < 0) {
|
||||
// negative buckets boundary
|
||||
bucketLeft = (-(Math.log(Math.abs(left)) + startNegative) / (xWidthTotal - adjust)) * 100 + '%';
|
||||
} else {
|
||||
// positive buckets boundary
|
||||
bucketLeft =
|
||||
((Math.log(left) - startPositive + defaultExpBucketWidth + xWidthNegative - adjust) /
|
||||
(xWidthTotal - adjust)) *
|
||||
100 +
|
||||
'%';
|
||||
}
|
||||
if (left < 0 && right > 0) {
|
||||
// if the bucket crosses the zero axis
|
||||
bucketLeft = (xWidthNegative / xWidthTotal) * 100 + '%';
|
||||
}
|
||||
if (left === 0 && right === 0) {
|
||||
// do not render zero width zero bucket
|
||||
bucketLeft = '0%';
|
||||
bucketWidth = '0%';
|
||||
}
|
||||
|
||||
bucketHeight = (count / countMax) * 100 + '%';
|
||||
break;
|
||||
default:
|
||||
throw new Error('Invalid scale');
|
||||
}
|
||||
|
||||
return (
|
||||
<React.Fragment key={bIdx}>
|
||||
<div
|
||||
id={bucketIdx}
|
||||
className="histogram-bucket-slot"
|
||||
style={{
|
||||
left: bucketLeft,
|
||||
width: bucketWidth,
|
||||
}}
|
||||
>
|
||||
<div
|
||||
id={bucketIdx}
|
||||
className="histogram-bucket"
|
||||
style={{
|
||||
height: bucketHeight,
|
||||
}}
|
||||
></div>
|
||||
<UncontrolledTooltip
|
||||
style={{ maxWidth: 'unset', padding: 10, textAlign: 'left' }}
|
||||
placement="bottom"
|
||||
target={bucketIdx}
|
||||
>
|
||||
<strong>range:</strong> {bucketRangeString(b)}
|
||||
<br />
|
||||
<strong>count:</strong> {count}
|
||||
</UncontrolledTooltip>
|
||||
</div>
|
||||
</React.Fragment>
|
||||
);
|
||||
})}
|
||||
</React.Fragment>
|
||||
);
|
||||
};
|
||||
|
||||
export default HistogramChart;
|
||||
|
|
126
web/ui/react-app/src/pages/graph/HistogramHelpers.ts
Normal file
126
web/ui/react-app/src/pages/graph/HistogramHelpers.ts
Normal file
|
@ -0,0 +1,126 @@
|
|||
export type ScaleType = 'linear' | 'exponential';
|
||||
|
||||
// Calculates a default width of exponential histogram bucket ranges. If the last bucket is [0, 0],
|
||||
// the width is calculated using the second to last bucket. returns error if the last bucket is [-0, 0],
|
||||
export function calculateDefaultExpBucketWidth(
|
||||
last: [number, string, string, string],
|
||||
buckets: [number, string, string, string][]
|
||||
): number {
|
||||
if (parseFloat(last[2]) === 0 || parseFloat(last[1]) === 0) {
|
||||
if (buckets.length > 1) {
|
||||
return Math.abs(
|
||||
Math.log(Math.abs(parseFloat(buckets[buckets.length - 2][2]))) -
|
||||
Math.log(Math.abs(parseFloat(buckets[buckets.length - 2][1])))
|
||||
);
|
||||
} else {
|
||||
throw new Error('Only one bucket in histogram ([-0, 0]). Cannot calculate defaultExpBucketWidth.');
|
||||
}
|
||||
} else {
|
||||
return Math.abs(Math.log(Math.abs(parseFloat(last[2]))) - Math.log(Math.abs(parseFloat(last[1]))));
|
||||
}
|
||||
}
|
||||
|
||||
// Finds the lowest positive value from the bucket ranges
|
||||
// Returns 0 if no positive values are found or if there are no buckets.
|
||||
export function findMinPositive(buckets: [number, string, string, string][]) {
|
||||
if (!buckets || buckets.length === 0) {
|
||||
return 0; // no buckets
|
||||
}
|
||||
for (let i = 0; i < buckets.length; i++) {
|
||||
const right = parseFloat(buckets[i][2]);
|
||||
const left = parseFloat(buckets[i][1]);
|
||||
|
||||
if (left > 0) {
|
||||
return left;
|
||||
}
|
||||
if (left < 0 && right > 0) {
|
||||
return right;
|
||||
}
|
||||
if (i === buckets.length - 1) {
|
||||
if (right > 0) {
|
||||
return right;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0; // all buckets are negative
|
||||
}
|
||||
|
||||
// Finds the lowest negative value from the bucket ranges
|
||||
// Returns 0 if no negative values are found or if there are no buckets.
|
||||
export function findMaxNegative(buckets: [number, string, string, string][]) {
|
||||
if (!buckets || buckets.length === 0) {
|
||||
return 0; // no buckets
|
||||
}
|
||||
for (let i = 0; i < buckets.length; i++) {
|
||||
const right = parseFloat(buckets[i][2]);
|
||||
const left = parseFloat(buckets[i][1]);
|
||||
const prevRight = i > 0 ? parseFloat(buckets[i - 1][2]) : 0;
|
||||
|
||||
if (right >= 0) {
|
||||
if (i === 0) {
|
||||
if (left < 0) {
|
||||
return left; // return the first negative bucket
|
||||
}
|
||||
return 0; // all buckets are positive
|
||||
}
|
||||
return prevRight; // return the last negative bucket
|
||||
}
|
||||
}
|
||||
console.log('findmaxneg returning: ', buckets[buckets.length - 1][2]);
|
||||
return parseFloat(buckets[buckets.length - 1][2]); // all buckets are negative
|
||||
}
|
||||
|
||||
// Calculates the left position of the zero axis as a percentage string.
|
||||
export function findZeroAxisLeft(
|
||||
scale: ScaleType,
|
||||
rangeMin: number,
|
||||
rangeMax: number,
|
||||
minPositive: number,
|
||||
maxNegative: number,
|
||||
zeroBucketIdx: number,
|
||||
widthNegative: number,
|
||||
widthTotal: number,
|
||||
expBucketWidth: number
|
||||
): string {
|
||||
if (scale === 'linear') {
|
||||
return ((0 - rangeMin) / (rangeMax - rangeMin)) * 100 + '%';
|
||||
} else {
|
||||
if (maxNegative === 0) {
|
||||
return '0%';
|
||||
}
|
||||
if (minPositive === 0) {
|
||||
return '100%';
|
||||
}
|
||||
if (zeroBucketIdx === -1) {
|
||||
// if there is no zero bucket, we must zero axis between buckets around zero
|
||||
return (widthNegative / widthTotal) * 100 + '%';
|
||||
}
|
||||
if ((widthNegative + 0.5 * expBucketWidth) / widthTotal > 0) {
|
||||
return ((widthNegative + 0.5 * expBucketWidth) / widthTotal) * 100 + '%';
|
||||
} else {
|
||||
return '0%';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determines if the zero axis should be shown such that the zero label does not overlap with the range labels.
|
||||
// The zero axis is shown if it is between 5% and 95% of the graph.
|
||||
export function showZeroAxis(zeroAxisLeft: string) {
|
||||
const axisNumber = parseFloat(zeroAxisLeft.slice(0, -1));
|
||||
if (5 < axisNumber && axisNumber < 95) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Finds the index of the bucket whose range includes zero
|
||||
export function findZeroBucket(buckets: [number, string, string, string][]): number {
|
||||
for (let i = 0; i < buckets.length; i++) {
|
||||
const left = parseFloat(buckets[i][1]);
|
||||
const right = parseFloat(buckets[i][2]);
|
||||
if (left <= 0 && right >= 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
|
@ -37,6 +37,7 @@ interface PanelState {
|
|||
lastQueryParams: QueryParams | null;
|
||||
loading: boolean;
|
||||
warnings: string[] | null;
|
||||
infos: string[] | null;
|
||||
error: string | null;
|
||||
stats: QueryStats | null;
|
||||
exprInputValue: string;
|
||||
|
@ -87,6 +88,7 @@ class Panel extends Component<PanelProps, PanelState> {
|
|||
lastQueryParams: null,
|
||||
loading: false,
|
||||
warnings: null,
|
||||
infos: null,
|
||||
error: null,
|
||||
stats: null,
|
||||
exprInputValue: props.options.expr,
|
||||
|
@ -204,6 +206,7 @@ class Panel extends Component<PanelProps, PanelState> {
|
|||
data: query.data,
|
||||
exemplars: exemplars?.data,
|
||||
warnings: query.warnings,
|
||||
infos: query.infos,
|
||||
lastQueryParams: {
|
||||
startTime,
|
||||
endTime,
|
||||
|
@ -307,6 +310,11 @@ class Panel extends Component<PanelProps, PanelState> {
|
|||
<Col>{warning && <Alert color="warning">{warning}</Alert>}</Col>
|
||||
</Row>
|
||||
))}
|
||||
{this.state.infos?.map((info, index) => (
|
||||
<Row key={index}>
|
||||
<Col>{info && <Alert color="info">{info}</Alert>}</Col>
|
||||
</Row>
|
||||
))}
|
||||
<Row>
|
||||
<Col>
|
||||
<Nav tabs>
|
||||
|
|
|
@ -265,6 +265,8 @@ type Options struct {
|
|||
IsAgent bool
|
||||
AppName string
|
||||
|
||||
AcceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg
|
||||
|
||||
Gatherer prometheus.Gatherer
|
||||
Registerer prometheus.Registerer
|
||||
}
|
||||
|
@ -353,6 +355,7 @@ func New(logger log.Logger, o *Options) *Handler {
|
|||
o.Registerer,
|
||||
nil,
|
||||
o.EnableRemoteWriteReceiver,
|
||||
o.AcceptRemoteWriteProtoMsgs,
|
||||
o.EnableOTLPWriteReceiver,
|
||||
)
|
||||
|
||||
|
|
Loading…
Reference in a new issue