Compare commits

...

19 commits

Author SHA1 Message Date
Daniel Kimsey 2c311ba85f
Merge aa3e58358b into e480cf21eb 2024-09-19 13:01:25 -04:00
Julius Volz e480cf21eb
Merge pull request #14931 from prometheus/nexucis/autocomplete-topl
Some checks are pending
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
UI/PromQL: autocomplete topk like aggregation function parameters
2024-09-19 18:12:59 +02:00
Björn Rabenstein df9916ef66
Merge pull request #14677 from prometheus/beorn7/histogram
promql(native histograms): Introduce exponential interpolation
2024-09-19 18:08:59 +02:00
Björn Rabenstein c7fb6188b4
Merge pull request #14930 from jan--f/holt-winters-experimental
move holt_winters to the experimental functions and rename
2024-09-19 17:52:39 +02:00
Augustin Husson 6e899fbb16 fix autocompletion when using by/without
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
2024-09-19 16:35:14 +02:00
Jan Fajerski aa6dd70812 changelog: record holt_winters rename
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
2024-09-19 15:29:09 +02:00
Jan Fajerski 96e5a94d29 promql: rename holt_winters to double_exponential_smoothing
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
2024-09-19 15:29:01 +02:00
beorn7 6fcd225aee promql(native histograms): Introduce exponential interpolation
Some checks are pending
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
The linear interpolation (assuming that observations are uniformly
distributed within a bucket) is a solid and simple assumption in lack
of any other information. However, the exponential bucketing used by
standard schemas of native histograms has been chosen to cover the
whole range of observations in a way that bucket populations are
spread out over buckets in a reasonably way for typical distributions
encountered in real-world scenarios.

This is the origin of the idea implemented here: If we divide a given
bucket into two (or more) smaller exponential buckets, we "most
naturally" expect that the samples in the original buckets will split
among those smaller buckets in a more or less uniform fashion. With
this assumption, we end up with an "exponential interpolation", which
therefore appears to be a better match for histograms with exponential
bucketing.

This commit leaves the linear interpolation in place for NHCB, but
changes the interpolation for exponential native histograms to
exponential. This affects `histogram_quantile` and
`histogram_fraction` (because the latter is more or less the inverse
of the former).

The zero bucket has to be treated specially because the assumption
above would lead to an "interpolation to zero" (the bucket density
approaches infinity around zero, and with the postulated uniform usage
of buckets, we would end up with an estimate of zero for all quantiles
ending up in the zero bucket). We simply fall back to linear
interpolation within the zero bucket.

At the same time, this commit makes the call to stick with the
assumption that the zero bucket only contains positive observations
for native histograms without negative buckets (and vice versa). (This
is an assumption relevant for interpolation. It is a mostly academic
point, as the zero bucket is supposed to be very small anyway.
However, in cases where it _is_ relevantly broad, the assumption helps
a lot in practice.)

This commit also updates and completes the documentation to match both
details about interpolation.

As a more high level note: The approach here attempts to strike a
balance between a more simplistic approach without any assumption, and
a more involved approach with more sophisticated assumptions. I will
shortly describe both for reference:

The "zero assumption" approach would be to not interpolate at all, but
_always_ return the harmonic mean of the bucket boundaries of the
bucket the quantile ends up in. This has the advantage of minimizing
the maximum possible relative error of the quantile estimation.
(Depending on the exact definition of the relative error of an
estimation, there is also an argument to return the arithmetic mean of
the bucket boundaries.) While limiting the maximum possible relative
error is a good property, this approach would throw away the
information if a quantile is closer to the upper or lower end of the
population within a bucket. This can be valuable trending information
in a dashboard. With any kind of interpolation, the maximum possible
error of a quantile estimation increases to the full width of a bucket
(i.e. it more than doubles for the harmonic mean approach, and
precisely doubles for the arithmetic mean approach). However, in
return the _expectation value_ of the error decreases. The increase of
the theoretical maximum only has practical relevance for pathologic
distributions. For example, if there are thousand observations within
a bucket, they could _all_ be at the upper bound of the bucket. If the
quantile calculation picks the 1st observation in the bucket as the
relevant one, an interpolation will yield a value close to the lower
bucket boundary, while the true quantile value is close to the upper
boundary.

The "fancy interpolation" approach would be one that analyses the
_actual_ distribution of samples in the histogram. A lot of statistics
could be applied based on the information we have available in the
histogram. This would include the population of neighboring (or even
all) buckets in the histogram. In general, the resolution of a native
histogram should be quite high, and therefore, those "fancy"
approaches would increase the computational cost quite a bit with very
little practical benefits (i.e. just tiny corrections of the estimated
quantile value). The results are also much harder to reason with.

Signed-off-by: beorn7 <beorn@grafana.com>
2024-09-19 14:19:10 +02:00
Julius Volz c36589a6dd
Merge pull request #14940 from roidelapluie/subsecondzoom
Some checks are pending
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
UI: Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot
2024-09-19 12:20:37 +02:00
Julien 546f780006 UI: Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot
Fixes #9135

Signed-off-by: Julien <roidelapluie@o11y.eu>
2024-09-19 11:51:41 +02:00
Jan Fajerski 15cea39136 promql: put holt_winters behind experimental feature flag
Signed-off-by: Jan Fajerski <jfajersk@redhat.com>
2024-09-18 15:39:58 +02:00
Joshua Hesketh b6107cc888
Make rate possible non-counter annotation consistent (#14910)
Some checks are pending
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
* Make rate possible non-counter annotation consistent

Previously a PossibleNonCounterInfo annotation would be left in cases
where a range-vector selects 1 float data point, even if no more points
are selected in order to calculate a rate.

This change ensures an output float exists before emitting such an
annotation.

This fixes an inconsistency where a series with mixed data (ie, a float
and a native histogram) would emit an annotation without any points.

For example,

```

load 1m
series{label="a"} 1 {{schema:1 sum:10 count:5 buckets:[1 2 3]}}

eval instant at 1m rate(series[1m1s])

```

Would have a PossibleNonCounterInfo annotation.

Wheras

```

load 1m
series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}}

eval instant at 1m rate(series[1m1s])

```

Would not. 

---------

Signed-off-by: Joshua Hesketh <josh@nitrotech.org>
2024-09-18 10:21:25 +00:00
Augustin Husson 69619990f8 UI/PromQL: autocomplete topk like aggregation function parameters
Signed-off-by: Augustin Husson <husson.augustin@gmail.com>
2024-09-18 11:53:09 +02:00
Bryan Boreham bb47f78929
Merge pull request #14505 from marioferh/improve_performance_regex
[CHANGE] regexp . to match \n and optimize performance
2024-09-18 09:54:16 +01:00
Callum Styan c328d5fc88
fix rwv2 build write request benchmark, also change how the memory usage (#14925)
is reported for these benchmarks to more accurately reflect what's
actually allocated

Signed-off-by: Callum Styan <callumstyan@gmail.com>
2024-09-18 07:04:10 +01:00
Julius Volz b8d1336d42
Merge pull request #14912 from roidelapluie/notready
Some checks are pending
CI / Go tests (push) Waiting to run
CI / More Go tests (push) Waiting to run
CI / Go tests with previous Go version (push) Waiting to run
CI / UI tests (push) Waiting to run
CI / Go tests on Windows (push) Waiting to run
CI / Mixins tests (push) Waiting to run
CI / Build Prometheus for common architectures (0) (push) Waiting to run
CI / Build Prometheus for common architectures (1) (push) Waiting to run
CI / Build Prometheus for common architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (0) (push) Waiting to run
CI / Build Prometheus for all architectures (1) (push) Waiting to run
CI / Build Prometheus for all architectures (10) (push) Waiting to run
CI / Build Prometheus for all architectures (11) (push) Waiting to run
CI / Build Prometheus for all architectures (2) (push) Waiting to run
CI / Build Prometheus for all architectures (3) (push) Waiting to run
CI / Build Prometheus for all architectures (4) (push) Waiting to run
CI / Build Prometheus for all architectures (5) (push) Waiting to run
CI / Build Prometheus for all architectures (6) (push) Waiting to run
CI / Build Prometheus for all architectures (7) (push) Waiting to run
CI / Build Prometheus for all architectures (8) (push) Waiting to run
CI / Build Prometheus for all architectures (9) (push) Waiting to run
CI / Report status of build Prometheus for all architectures (push) Blocked by required conditions
CI / Check generated parser (push) Waiting to run
CI / golangci-lint (push) Waiting to run
CI / fuzzing (push) Waiting to run
CI / codeql (push) Waiting to run
CI / Publish main branch artifacts (push) Blocked by required conditions
CI / Publish release artefacts (push) Blocked by required conditions
CI / Publish UI on npm Registry (push) Blocked by required conditions
Scorecards supply-chain security / Scorecards analysis (push) Waiting to run
mantine UI: Distinguish between Not Ready and Stopping
2024-09-17 19:40:13 +02:00
Julien ac5377873f mantine UI: Distinguish between Not Ready and Stopping
Signed-off-by: Julien <roidelapluie@o11y.eu>
2024-09-17 16:02:16 +02:00
Mario Fernandez 5814920601
Fix: optimize .* regexp performance
Shortcut for `.*` matches newlines as well.
Add preamble change ^(?s:
Add test
dotAll flag por al regex
Add and fix regex tests

Signed-off-by: Mario Fernandez <mariofer@redhat.com>
2024-09-17 12:18:31 +02:00
Daniel Kimsey aa3e58358b consul: Add support for catalog list services filter
This adds support for Consul's Catalog [List Services][^1] API's `filter`
parameter added in 1.14.x. This parameter grants the operator more
flexibility to do server-side filtering of the Catalog, before
Prometheus subscribes for updates. Operators can use this to improve
both the performance of Prometheus's Consul SD and reduce the impact of
enumerating large catalogs.

[^1]: https://developer.hashicorp.com/consul/api-docs/v1.14.x/catalog

Signed-off-by: Daniel Kimsey <dekimsey@protonmail.com>
2024-03-17 20:32:54 -05:00
35 changed files with 909 additions and 340 deletions

View file

@ -1,5 +1,10 @@
# Changelog
## unreleased
* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
## 3.0.0-beta.0 / 2024-09-05
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.

View file

@ -983,7 +983,7 @@ func main() {
},
func(err error) {
close(cancel)
webHandler.SetReady(false)
webHandler.SetReady(web.Stopping)
},
)
}
@ -1162,7 +1162,7 @@ func main() {
reloadReady.Close()
webHandler.SetReady(true)
webHandler.SetReady(web.Ready)
level.Info(logger).Log("msg", "Server is ready to receive web requests.")
<-cancel
return nil

View file

@ -113,8 +113,11 @@ type SDConfig struct {
Services []string `yaml:"services,omitempty"`
// A list of tags used to filter instances inside a service. Services must contain all tags in the list.
ServiceTags []string `yaml:"tags,omitempty"`
// Desired node metadata.
// Desired node metadata. As of Consul 1.14, consider `filter` instead.
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
// Consul filter string
// See https://www.consul.io/api-docs/catalog#filtering-1, for syntax
Filter string `yaml:"filter,omitempty"`
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
}
@ -174,6 +177,7 @@ type Discovery struct {
watchedServices []string // Set of services which will be discovered.
watchedTags []string // Tags used to filter instances of a service.
watchedNodeMeta map[string]string
watchedFilter string
allowStale bool
refreshInterval time.Duration
finalizer func()
@ -218,6 +222,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
watchedServices: conf.Services,
watchedTags: conf.ServiceTags,
watchedNodeMeta: conf.NodeMeta,
watchedFilter: conf.Filter,
allowStale: conf.AllowStale,
refreshInterval: time.Duration(conf.RefreshInterval),
clientDatacenter: conf.Datacenter,
@ -361,13 +366,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
// entire list of services.
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
catalog := d.client.Catalog()
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter)
opts := &consul.QueryOptions{
WaitIndex: *lastIndex,
WaitTime: watchTimeout,
AllowStale: d.allowStale,
NodeMeta: d.watchedNodeMeta,
Filter: d.watchedFilter,
}
t0 := time.Now()
srvs, meta, err := catalog.Services(opts.WithContext(ctx))

View file

@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
case "/v1/catalog/services?index=1&wait=120000ms":
time.Sleep(5 * time.Second)
response = ServicesTestAnswer
case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms":
response = ServicesTestAnswer
default:
t.Errorf("Unhandled consul call: %s", r.URL)
}
@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) {
<-ch
}
// Watch the test service with a specific tag and node-meta via Filter parameter.
func TestFilterOption(t *testing.T) {
stub, config := newServer(t)
defer stub.Close()
config.Services = []string{"test"}
config.Filter = `NodeMeta.rack_name == "2304"`
config.Token = "fake-token"
d := newDiscovery(t, config)
ctx, cancel := context.WithCancel(context.Background())
ch := make(chan []*targetgroup.Group)
go func() {
d.Run(ctx, ch)
close(ch)
}()
checkOneTarget(t, <-ch)
cancel()
}
func TestGetDatacenterShouldReturnError(t *testing.T) {
for _, tc := range []struct {
handler func(http.ResponseWriter, *http.Request)

View file

@ -789,14 +789,17 @@ The following meta labels are available on targets during [relabeling](#relabel_
services:
[ - <string> ]
# See https://www.consul.io/api/catalog.html#list-nodes-for-service to know more
# about the possible filters that can be used.
# A Consul Filter expression used to filter the catalog results
# See https://www.consul.io/api-docs/catalog#list-services to know more
# about the filter expressions that can be used.
[ filter: <string> ]
# The `tags` and `node_meta` fields are deprecated in Consul in favor of `filter`.
# An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list.
tags:
[ - <string> ]
# Node metadata key/value pairs to filter nodes for a given service.
# Node metadata key/value pairs to filter nodes for a given service. As of Consul 1.14, consider `filter` instead.
[ node_meta:
[ <string>: <string> ... ] ]

View file

@ -326,45 +326,70 @@ With native histograms, aggregating everything works as usual without any `by` c
histogram_quantile(0.9, sum(rate(http_request_duration_seconds[10m])))
The `histogram_quantile()` function interpolates quantile values by
assuming a linear distribution within a bucket.
In the (common) case that a quantile value does not coincide with a bucket
boundary, the `histogram_quantile()` function interpolates the quantile value
within the bucket the quantile value falls into. For classic histograms, for
native histograms with custom bucket boundaries, and for the zero bucket of
other native histograms, it assumes a uniform distribution of observations
within the bucket (also called _linear interpolation_). For the
non-zero-buckets of native histograms with a standard exponential bucketing
schema, the interpolation is done under the assumption that the samples within
the bucket are distributed in a way that they would uniformly populate the
buckets in a hypothetical histogram with higher resolution. (This is also
called _exponential interpolation_.)
If `b` has 0 observations, `NaN` is returned. For φ < 0, `-Inf` is
returned. For φ > 1, `+Inf` is returned. For φ = `NaN`, `NaN` is returned.
The following is only relevant for classic histograms: If `b` contains
fewer than two buckets, `NaN` is returned. The highest bucket must have an
upper bound of `+Inf`. (Otherwise, `NaN` is returned.) If a quantile is located
in the highest bucket, the upper bound of the second highest bucket is
returned. A lower limit of the lowest bucket is assumed to be 0 if the upper
bound of that bucket is greater than
0. In that case, the usual linear interpolation is applied within that
bucket. Otherwise, the upper bound of the lowest bucket is returned for
quantiles located in the lowest bucket.
Special cases for classic histograms:
You can use `histogram_quantile(0, v instant-vector)` to get the estimated minimum value stored in
a histogram.
* If `b` contains fewer than two buckets, `NaN` is returned.
* The highest bucket must have an upper bound of `+Inf`. (Otherwise, `NaN` is
returned.)
* If a quantile is located in the highest bucket, the upper bound of the second
highest bucket is returned.
* The lower limit of the lowest bucket is assumed to be 0 if the upper bound of
that bucket is greater than 0. In that case, the usual linear interpolation
is applied within that bucket. Otherwise, the upper bound of the lowest
bucket is returned for quantiles located in the lowest bucket.
You can use `histogram_quantile(1, v instant-vector)` to get the estimated maximum value stored in
a histogram.
Special cases for native histograms (relevant for the exact interpolation
happening within the zero bucket):
Buckets of classic histograms are cumulative. Therefore, the following should always be the case:
* A zero bucket with finite width is assumed to contain no negative
observations if the histogram has observations in positive buckets, but none
in negative buckets.
* A zero bucket with finite width is assumed to contain no positive
observations if the histogram has observations in negative buckets, but none
in positive buckets.
* The counts in the buckets are monotonically increasing (strictly non-decreasing).
* A lack of observations between the upper limits of two consecutive buckets results in equal counts
in those two buckets.
You can use `histogram_quantile(0, v instant-vector)` to get the estimated
minimum value stored in a histogram.
However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets
with `sum(rate(...))`) or invalid data might violate these assumptions. In that case,
`histogram_quantile` would be unable to return meaningful results. To mitigate the issue,
`histogram_quantile` assumes that tiny relative differences between consecutive buckets are happening
because of floating point precision errors and ignores them. (The threshold to ignore a difference
between two buckets is a trillionth (1e-12) of the sum of both buckets.) Furthermore, if there are
non-monotonic bucket counts even after this adjustment, they are increased to the value of the
previous buckets to enforce monotonicity. The latter is evidence for an actual issue with the input
data and is therefore flagged with an informational annotation reading `input to histogram_quantile
needed to be fixed for monotonicity`. If you encounter this annotation, you should find and remove
the source of the invalid data.
You can use `histogram_quantile(1, v instant-vector)` to get the estimated
maximum value stored in a histogram.
Buckets of classic histograms are cumulative. Therefore, the following should
always be the case:
* The counts in the buckets are monotonically increasing (strictly
non-decreasing).
* A lack of observations between the upper limits of two consecutive buckets
results in equal counts in those two buckets.
However, floating point precision issues (e.g. small discrepancies introduced
by computing of buckets with `sum(rate(...))`) or invalid data might violate
these assumptions. In that case, `histogram_quantile` would be unable to return
meaningful results. To mitigate the issue, `histogram_quantile` assumes that
tiny relative differences between consecutive buckets are happening because of
floating point precision errors and ignores them. (The threshold to ignore a
difference between two buckets is a trillionth (1e-12) of the sum of both
buckets.) Furthermore, if there are non-monotonic bucket counts even after this
adjustment, they are increased to the value of the previous buckets to enforce
monotonicity. The latter is evidence for an actual issue with the input data
and is therefore flagged with an informational annotation reading `input to
histogram_quantile needed to be fixed for monotonicity`. If you encounter this
annotation, you should find and remove the source of the invalid data.
## `histogram_stddev()` and `histogram_stdvar()`
@ -380,15 +405,22 @@ do not show up in the returned vector.
Similarly, `histogram_stdvar(v instant-vector)` returns the estimated standard
variance of observations in a native histogram.
## `holt_winters()`
## `double_exponential_smoothing()`
`holt_winters(v range-vector, sf scalar, tf scalar)` produces a smoothed value
**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.**
`double_exponential_smoothing(v range-vector, sf scalar, tf scalar)` produces a smoothed value
for time series based on the range in `v`. The lower the smoothing factor `sf`,
the more importance is given to old data. The higher the trend factor `tf`, the
more trends in the data is considered. Both `sf` and `tf` must be between 0 and
1.
For additional details, refer to [NIST Engineering Statistics Handbook](https://www.itl.nist.gov/div898/handbook/pmc/section4/pmc433.htm).
In Prometheus V2 this function was called `holt_winters`. This caused confusion
since the Holt-Winters method usually refers to triple exponential smoothing.
Double exponential smoothing as implemented here is also referred to as "Holt
Linear".
`holt_winters` should only be used with gauges.
`double_exponential_smoothing` should only be used with gauges.
## `hour()`

View file

@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
// available, even if the string matcher is faster.
m.matchString = m.stringMatcher.Matches
} else {
parsed, err := syntax.Parse(v, syntax.Perl)
parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL)
if err != nil {
return nil, err
}
// Simplify the syntax tree to run faster.
parsed = parsed.Simplify()
m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$")
m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$")
if err != nil {
return nil, err
}

View file

@ -121,7 +121,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) {
t.Parallel()
m, err := NewFastRegexMatcher(r)
require.NoError(t, err)
re := regexp.MustCompile("^(?:" + r + ")$")
re := regexp.MustCompile("^(?s:" + r + ")$")
require.Equal(t, re.MatchString(v), m.MatchString(v))
})
}
@ -167,7 +167,7 @@ func TestOptimizeConcatRegex(t *testing.T) {
}
for _, c := range cases {
parsed, err := syntax.Parse(c.regex, syntax.Perl)
parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
prefix, suffix, contains := optimizeConcatRegex(parsed)
@ -248,7 +248,7 @@ func TestFindSetMatches(t *testing.T) {
c := c
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matches, actualCaseSensitive := findSetMatches(parsed)
require.Equal(t, c.expMatches, matches)
@ -348,15 +348,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
pattern string
exp StringMatcher
}{
{".*", anyStringWithoutNewlineMatcher{}},
{".*?", anyStringWithoutNewlineMatcher{}},
{".*", trueMatcher{}},
{".*?", trueMatcher{}},
{"(?s:.*)", trueMatcher{}},
{"(.*)", anyStringWithoutNewlineMatcher{}},
{"^.*$", anyStringWithoutNewlineMatcher{}},
{".+", &anyNonEmptyStringMatcher{matchNL: false}},
{"(.*)", trueMatcher{}},
{"^.*$", trueMatcher{}},
{".+", &anyNonEmptyStringMatcher{matchNL: true}},
{"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}},
{"^.+$", &anyNonEmptyStringMatcher{matchNL: false}},
{"(.+)", &anyNonEmptyStringMatcher{matchNL: false}},
{"^.+$", &anyNonEmptyStringMatcher{matchNL: true}},
{"(.+)", &anyNonEmptyStringMatcher{matchNL: true}},
{"", emptyStringMatcher{}},
{"^$", emptyStringMatcher{}},
{"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}},
@ -366,23 +366,23 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})},
{"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})},
{"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})},
{".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: anyStringWithoutNewlineMatcher{}}},
{"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}},
{"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}},
{"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"10\\.0\\.(1|2)\\.+", nil},
{"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}},
{"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}},
{"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})},
{"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})},
{"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: nil}},
{"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}},
{"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}},
{"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})},
{"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})},
{"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}},
// we don't support case insensitive matching for contains.
// This is because there's no strings.IndexOfFold function.
// We can revisit later if this is really popular by using strings.ToUpper.
@ -393,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{".*foo.*bar.*", nil},
{`\d*`, nil},
{".", nil},
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}},
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}},
// This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat.
// It would make the code too complex to handle it.
{"(.+)/(foo.*|bar$)", nil},
// Case sensitive alternate with same literal prefix and .* suffix.
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}},
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}},
// Case insensitive alternate with same literal prefix and .* suffix.
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}},
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}},
// Concatenated variable length selectors are not supported.
{"foo.*.*", nil},
{"foo.+.+", nil},
@ -410,15 +410,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{"aaa.?.?", nil},
{"aaa.?.*", nil},
// Regexps with ".?".
{"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}},
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}},
{"f.?o", nil},
} {
c := c
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matches := stringMatcherFromRegexp(parsed)
require.Equal(t, c.exp, matches)
@ -437,16 +437,16 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
{
pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)",
expectedLiteralPrefixMatchers: 3,
expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "xyz-016a-ixb-dp\n"},
expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"},
},
// Case insensitive.
{
pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)",
expectedLiteralPrefixMatchers: 3,
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX"},
expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp", "xyz-016a-ixb-dp\n"},
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"},
expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"},
},
// Nested literal prefixes, case sensitive.
@ -474,13 +474,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
},
} {
t.Run(c.pattern, func(t *testing.T) {
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matcher := stringMatcherFromRegexp(parsed)
require.NotNil(t, matcher)
re := regexp.MustCompile("^" + c.pattern + "$")
re := regexp.MustCompile("^(?s:" + c.pattern + ")$")
// Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher.
numPrefixMatchers := 0
@ -523,16 +523,16 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) {
{
pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)",
expectedLiteralSuffixMatchers: 2,
expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"},
expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"},
},
// Case insensitive.
{
pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)",
expectedLiteralSuffixMatchers: 2,
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op"},
expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"},
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"},
expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"},
},
// Nested literal suffixes, case sensitive.
@ -552,13 +552,13 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) {
},
} {
t.Run(c.pattern, func(t *testing.T) {
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matcher := stringMatcherFromRegexp(parsed)
require.NotNil(t, matcher)
re := regexp.MustCompile("^" + c.pattern + "$")
re := regexp.MustCompile("^(?s:" + c.pattern + ")$")
// Pre-condition check: ensure it contains literalSuffixStringMatcher.
numSuffixMatchers := 0
@ -598,26 +598,26 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) {
{
pattern: "test.?",
expectedZeroOrOneMatchers: 1,
expectedMatches: []string{"test", "test!"},
expectedNotMatches: []string{"test\n", "tes", "test!!"},
expectedMatches: []string{"test\n", "test", "test!"},
expectedNotMatches: []string{"tes", "test!!"},
},
{
pattern: ".?test",
expectedZeroOrOneMatchers: 1,
expectedMatches: []string{"test", "!test"},
expectedNotMatches: []string{"\ntest", "tes", "test!"},
expectedMatches: []string{"\ntest", "test", "!test"},
expectedNotMatches: []string{"tes", "test!"},
},
{
pattern: "(aaa.?|bbb.?)",
expectedZeroOrOneMatchers: 2,
expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX"},
expectedNotMatches: []string{"aa", "aaaXX", "aaa\n", "bb", "bbbXX", "bbb\n"},
expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"},
expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"},
},
{
pattern: ".*aaa.?",
expectedZeroOrOneMatchers: 1,
expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX"},
expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX", "XXXaaa\n"},
expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"},
expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"},
},
// Match newline.
@ -632,18 +632,18 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) {
{
pattern: "(aaa.?|((?s).?bbb.+))",
expectedZeroOrOneMatchers: 2,
expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX"},
expectedNotMatches: []string{"aa", "aaa\n", "Xbbb", "\nbbb"},
expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"},
expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"},
},
} {
t.Run(c.pattern, func(t *testing.T) {
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matcher := stringMatcherFromRegexp(parsed)
require.NotNil(t, matcher)
re := regexp.MustCompile("^" + c.pattern + "$")
re := regexp.MustCompile("^(?s:" + c.pattern + ")$")
// Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher.
numZeroOrOneMatchers := 0
@ -1112,7 +1112,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) {
}
b.Logf("regexp: %s", re)
parsed, err := syntax.Parse(re, syntax.Perl)
parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL)
require.NoError(b, err)
unoptimized := stringMatcherFromRegexpInternal(parsed)

View file

@ -171,7 +171,7 @@ type Regexp struct {
// NewRegexp creates a new anchored Regexp and returns an error if the
// passed-in regular expression does not compile.
func NewRegexp(s string) (Regexp, error) {
regex, err := regexp.Compile("^(?:" + s + ")$")
regex, err := regexp.Compile("^(?s:" + s + ")$")
return Regexp{Regexp: regex}, err
}
@ -218,8 +218,8 @@ func (re Regexp) String() string {
}
str := re.Regexp.String()
// Trim the anchor `^(?:` prefix and `)$` suffix.
return str[4 : len(str)-2]
// Trim the anchor `^(?s:` prefix and `)$` suffix.
return str[5 : len(str)-2]
}
// Process returns a relabeled version of the given label set. The relabel configurations

View file

@ -569,6 +569,29 @@ func TestRelabel(t *testing.T) {
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("line1.*line2"),
TargetLabel: "d",
Separator: ";",
Replacement: "match${1}",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
"d": "match",
}),
},
}
for _, test := range tests {

View file

@ -117,7 +117,7 @@ func rangeQueryCases() []benchCase {
},
// Holt-Winters and long ranges.
{
expr: "holt_winters(a_X[1d], 0.3, 0.3)",
expr: "double_exponential_smoothing(a_X[1d], 0.3, 0.3)",
},
{
expr: "changes(a_X[1d])",

View file

@ -1742,9 +1742,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
ev.samplesStats.UpdatePeak(ev.currentSamples)
if e.Func.Name == "rate" || e.Func.Name == "increase" {
samples := inMatrix[0]
metricName := samples.Metric.Get(labels.MetricName)
if metricName != "" && len(samples.Floats) > 0 &&
metricName := inMatrix[0].Metric.Get(labels.MetricName)
if metricName != "" && len(ss.Floats) > 0 &&
!strings.HasSuffix(metricName, "_total") &&
!strings.HasSuffix(metricName, "_sum") &&
!strings.HasSuffix(metricName, "_count") &&

View file

@ -19,6 +19,7 @@ import (
"fmt"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
@ -3708,3 +3709,75 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:
},
})
}
func TestRateAnnotations(t *testing.T) {
testCases := map[string]struct {
data string
expr string
expectedWarningAnnotations []string
expectedInfoAnnotations []string
}{
"info annotation when two samples are selected": {
data: `
series 1 2
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{
`PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`,
},
},
"no info annotations when no samples": {
data: `
series
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{},
},
"no info annotations when selecting one sample": {
data: `
series 1 2
`,
expr: "rate(series[10s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{},
},
"no info annotations when no samples due to mixed data types": {
data: `
series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}}
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{
`PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`,
},
expectedInfoAnnotations: []string{},
},
"no info annotations when selecting two native histograms": {
data: `
series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}}
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{},
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data))
t.Cleanup(func() { _ = store.Close() })
engine := newTestEngine(t)
query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute))
require.NoError(t, err)
t.Cleanup(query.Close)
res := query.Exec(context.Background())
require.NoError(t, res.Err)
warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0)
testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings)
testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos)
})
}
}

View file

@ -350,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects
// how trends in historical data will affect the current data. A higher trend factor increases the influence.
// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing".
func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0]
// The smoothing factor argument.
@ -1480,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
regexStr = stringFromArg(args[4])
)
regex, err := regexp.Compile("^(?:" + regexStr + ")$")
regex, err := regexp.Compile("^(?s:" + regexStr + ")$")
if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
}
@ -1657,82 +1657,82 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper)
// FunctionCalls is a list of all functions supported by PromQL, including their types.
var FunctionCalls = map[string]FunctionCall{
"abs": funcAbs,
"absent": funcAbsent,
"absent_over_time": funcAbsentOverTime,
"acos": funcAcos,
"acosh": funcAcosh,
"asin": funcAsin,
"asinh": funcAsinh,
"atan": funcAtan,
"atanh": funcAtanh,
"avg_over_time": funcAvgOverTime,
"ceil": funcCeil,
"changes": funcChanges,
"clamp": funcClamp,
"clamp_max": funcClampMax,
"clamp_min": funcClampMin,
"cos": funcCos,
"cosh": funcCosh,
"count_over_time": funcCountOverTime,
"days_in_month": funcDaysInMonth,
"day_of_month": funcDayOfMonth,
"day_of_week": funcDayOfWeek,
"day_of_year": funcDayOfYear,
"deg": funcDeg,
"delta": funcDelta,
"deriv": funcDeriv,
"exp": funcExp,
"floor": funcFloor,
"histogram_avg": funcHistogramAvg,
"histogram_count": funcHistogramCount,
"histogram_fraction": funcHistogramFraction,
"histogram_quantile": funcHistogramQuantile,
"histogram_sum": funcHistogramSum,
"histogram_stddev": funcHistogramStdDev,
"histogram_stdvar": funcHistogramStdVar,
"holt_winters": funcHoltWinters,
"hour": funcHour,
"idelta": funcIdelta,
"increase": funcIncrease,
"irate": funcIrate,
"label_replace": funcLabelReplace,
"label_join": funcLabelJoin,
"ln": funcLn,
"log10": funcLog10,
"log2": funcLog2,
"last_over_time": funcLastOverTime,
"mad_over_time": funcMadOverTime,
"max_over_time": funcMaxOverTime,
"min_over_time": funcMinOverTime,
"minute": funcMinute,
"month": funcMonth,
"pi": funcPi,
"predict_linear": funcPredictLinear,
"present_over_time": funcPresentOverTime,
"quantile_over_time": funcQuantileOverTime,
"rad": funcRad,
"rate": funcRate,
"resets": funcResets,
"round": funcRound,
"scalar": funcScalar,
"sgn": funcSgn,
"sin": funcSin,
"sinh": funcSinh,
"sort": funcSort,
"sort_desc": funcSortDesc,
"sort_by_label": funcSortByLabel,
"sort_by_label_desc": funcSortByLabelDesc,
"sqrt": funcSqrt,
"stddev_over_time": funcStddevOverTime,
"stdvar_over_time": funcStdvarOverTime,
"sum_over_time": funcSumOverTime,
"tan": funcTan,
"tanh": funcTanh,
"time": funcTime,
"timestamp": funcTimestamp,
"vector": funcVector,
"year": funcYear,
"abs": funcAbs,
"absent": funcAbsent,
"absent_over_time": funcAbsentOverTime,
"acos": funcAcos,
"acosh": funcAcosh,
"asin": funcAsin,
"asinh": funcAsinh,
"atan": funcAtan,
"atanh": funcAtanh,
"avg_over_time": funcAvgOverTime,
"ceil": funcCeil,
"changes": funcChanges,
"clamp": funcClamp,
"clamp_max": funcClampMax,
"clamp_min": funcClampMin,
"cos": funcCos,
"cosh": funcCosh,
"count_over_time": funcCountOverTime,
"days_in_month": funcDaysInMonth,
"day_of_month": funcDayOfMonth,
"day_of_week": funcDayOfWeek,
"day_of_year": funcDayOfYear,
"deg": funcDeg,
"delta": funcDelta,
"deriv": funcDeriv,
"exp": funcExp,
"floor": funcFloor,
"histogram_avg": funcHistogramAvg,
"histogram_count": funcHistogramCount,
"histogram_fraction": funcHistogramFraction,
"histogram_quantile": funcHistogramQuantile,
"histogram_sum": funcHistogramSum,
"histogram_stddev": funcHistogramStdDev,
"histogram_stdvar": funcHistogramStdVar,
"double_exponential_smoothing": funcDoubleExponentialSmoothing,
"hour": funcHour,
"idelta": funcIdelta,
"increase": funcIncrease,
"irate": funcIrate,
"label_replace": funcLabelReplace,
"label_join": funcLabelJoin,
"ln": funcLn,
"log10": funcLog10,
"log2": funcLog2,
"last_over_time": funcLastOverTime,
"mad_over_time": funcMadOverTime,
"max_over_time": funcMaxOverTime,
"min_over_time": funcMinOverTime,
"minute": funcMinute,
"month": funcMonth,
"pi": funcPi,
"predict_linear": funcPredictLinear,
"present_over_time": funcPresentOverTime,
"quantile_over_time": funcQuantileOverTime,
"rad": funcRad,
"rate": funcRate,
"resets": funcResets,
"round": funcRound,
"scalar": funcScalar,
"sgn": funcSgn,
"sin": funcSin,
"sinh": funcSinh,
"sort": funcSort,
"sort_desc": funcSortDesc,
"sort_by_label": funcSortByLabel,
"sort_by_label_desc": funcSortByLabelDesc,
"sqrt": funcSqrt,
"stddev_over_time": funcStddevOverTime,
"stdvar_over_time": funcStdvarOverTime,
"sum_over_time": funcSumOverTime,
"tan": funcTan,
"tanh": funcTanh,
"time": funcTime,
"timestamp": funcTimestamp,
"vector": funcVector,
"year": funcYear,
}
// AtModifierUnsafeFunctions are the functions whose result

View file

@ -202,10 +202,11 @@ var Functions = map[string]*Function{
ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector},
ReturnType: ValueTypeVector,
},
"holt_winters": {
Name: "holt_winters",
ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
ReturnType: ValueTypeVector,
"double_exponential_smoothing": {
Name: "double_exponential_smoothing",
ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar},
ReturnType: ValueTypeVector,
Experimental: true,
},
"hour": {
Name: "hour",

View file

@ -651,7 +651,7 @@ eval_ordered instant at 50m sort_by_label(node_uname_info, "release")
node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100
node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100
# Tests for holt_winters
# Tests for double_exponential_smoothing
clear
# positive trends
@ -661,7 +661,7 @@ load 10s
http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000
http_requests{job="api-server", instance="1", group="canary"} 0+40x2000
eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1)
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
{job="api-server", instance="0", group="production"} 8000
{job="api-server", instance="1", group="production"} 16000
{job="api-server", instance="0", group="canary"} 24000
@ -675,7 +675,7 @@ load 10s
http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000
http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000
eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1)
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
{job="api-server", instance="0", group="production"} 0
{job="api-server", instance="1", group="production"} -16000
{job="api-server", instance="0", group="canary"} 24000

View file

@ -46,9 +46,12 @@ eval instant at 1m histogram_fraction(1, 2, single_histogram)
eval instant at 1m histogram_fraction(0, 8, single_histogram)
{} 1
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to
# exponential interpolation, i.e. the "midpoint" within range 1 < x <=
# 2 is assumed where the bucket boundary would be if we increased the
# resolution of the histogram by one step.
eval instant at 1m histogram_quantile(0.5, single_histogram)
{} 1.5
{} 1.414213562373095
clear
@ -68,8 +71,9 @@ eval instant at 5m histogram_avg(multi_histogram)
eval instant at 5m histogram_fraction(1, 2, multi_histogram)
{} 0.5
# See explanation for exponential interpolation above.
eval instant at 5m histogram_quantile(0.5, multi_histogram)
{} 1.5
{} 1.414213562373095
# Each entry should look the same as the first.
@ -85,8 +89,9 @@ eval instant at 50m histogram_avg(multi_histogram)
eval instant at 50m histogram_fraction(1, 2, multi_histogram)
{} 0.5
# See explanation for exponential interpolation above.
eval instant at 50m histogram_quantile(0.5, multi_histogram)
{} 1.5
{} 1.414213562373095
clear
@ -109,8 +114,9 @@ eval instant at 5m histogram_avg(incr_histogram)
eval instant at 5m histogram_fraction(1, 2, incr_histogram)
{} 0.6
# See explanation for exponential interpolation above.
eval instant at 5m histogram_quantile(0.5, incr_histogram)
{} 1.5
{} 1.414213562373095
eval instant at 50m incr_histogram
@ -129,16 +135,18 @@ eval instant at 50m histogram_avg(incr_histogram)
eval instant at 50m histogram_fraction(1, 2, incr_histogram)
{} 0.8571428571428571
# See explanation for exponential interpolation above.
eval instant at 50m histogram_quantile(0.5, incr_histogram)
{} 1.5
{} 1.414213562373095
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
eval instant at 50m rate(incr_histogram[10m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
# Calculate the 50th percentile of observations over the last 10m.
# See explanation for exponential interpolation above.
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
{} 1.5
{} 1.414213562373095
clear
@ -211,8 +219,9 @@ eval instant at 1m histogram_avg(negative_histogram)
eval instant at 1m histogram_fraction(-2, -1, negative_histogram)
{} 0.5
# Exponential interpolation works the same as for positive buckets, just mirrored.
eval instant at 1m histogram_quantile(0.5, negative_histogram)
{} -1.5
{} -1.414213562373095
clear
@ -233,8 +242,9 @@ eval instant at 5m histogram_avg(two_samples_histogram)
eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram)
{} 0.5
# See explanation for exponential interpolation above.
eval instant at 5m histogram_quantile(0.5, two_samples_histogram)
{} -1.5
{} -1.414213562373095
clear
@ -392,20 +402,24 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1)
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
{} 16
# The following quantiles are within a bucket. Exponential
# interpolation is applied (rather than linear, as it is done for
# classic histograms), leading to slightly different quantile values.
eval instant at 10m histogram_quantile(0.99, histogram_quantile_1)
{} 15.759999999999998
{} 15.67072476139083
eval instant at 10m histogram_quantile(0.9, histogram_quantile_1)
{} 13.600000000000001
{} 12.99603834169977
eval instant at 10m histogram_quantile(0.6, histogram_quantile_1)
{} 4.799999999999997
{} 4.594793419988138
eval instant at 10m histogram_quantile(0.5, histogram_quantile_1)
{} 1.6666666666666665
{} 1.5874010519681994
# Linear interpolation within the zero bucket after all.
eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
{} 0.0006000000000000001
{} 0.0006
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
{} 0
@ -425,17 +439,20 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2)
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
{} 0
# Again, the quantile values here are slightly different from what
# they would be with linear interpolation. Note that quantiles
# ending up in the zero bucket are linearly interpolated after all.
eval instant at 10m histogram_quantile(0.99, histogram_quantile_2)
{} -6.000000000000048e-05
{} -0.00006
eval instant at 10m histogram_quantile(0.9, histogram_quantile_2)
{} -0.0005999999999999996
{} -0.0006
eval instant at 10m histogram_quantile(0.5, histogram_quantile_2)
{} -1.6666666666666667
{} -1.5874010519681996
eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
{} -13.6
{} -12.996038341699768
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
{} -16
@ -445,7 +462,9 @@ eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2)
clear
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
# Apply quantile function to histogram with both positive and negative
# buckets with zero bucket.
# First positive buckets with exponential interpolation.
load 10m
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
@ -456,31 +475,34 @@ eval instant at 10m histogram_quantile(1, histogram_quantile_3)
{} 16
eval instant at 10m histogram_quantile(0.99, histogram_quantile_3)
{} 15.519999999999996
{} 15.34822590920423
eval instant at 10m histogram_quantile(0.9, histogram_quantile_3)
{} 11.200000000000003
{} 10.556063286183155
eval instant at 10m histogram_quantile(0.7, histogram_quantile_3)
{} 1.2666666666666657
{} 1.2030250360821164
# Linear interpolation in the zero bucket, symmetrically centered around
# the zero point.
eval instant at 10m histogram_quantile(0.55, histogram_quantile_3)
{} 0.0006000000000000005
{} 0.0006
eval instant at 10m histogram_quantile(0.5, histogram_quantile_3)
{} 0
eval instant at 10m histogram_quantile(0.45, histogram_quantile_3)
{} -0.0005999999999999996
{} -0.0006
# Finally negative buckets with mirrored exponential interpolation.
eval instant at 10m histogram_quantile(0.3, histogram_quantile_3)
{} -1.266666666666667
{} -1.2030250360821169
eval instant at 10m histogram_quantile(0.1, histogram_quantile_3)
{} -11.2
{} -10.556063286183155
eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
{} -15.52
{} -15.34822590920423
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
{} -16
@ -490,6 +512,90 @@ eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3)
clear
# Try different schemas. (The interpolation logic must not depend on the schema.)
clear
load 1m
var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}}
var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}}
var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}}
eval instant at 1m histogram_quantile(0.5, var_res_histogram)
{schema="-1"} 2.0
{schema="0"} 1.4142135623730951
{schema="+1"} 1.189207
eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"})
{schema="-1"} 0.5
eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"})
{schema="0"} 0.5
eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"})
{schema="+1"} 0.5
# The same as above, but one bucket "further to the right".
clear
load 1m
var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}}
var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}}
var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}}
eval instant at 1m histogram_quantile(0.5, var_res_histogram)
{schema="-1"} 8.0
{schema="0"} 2.82842712474619
{schema="+1"} 1.6817928305074292
eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"})
{schema="-1"} 0.5
eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"})
{schema="0"} 0.5
eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"})
{schema="+1"} 0.5
# And everything again but for negative buckets.
clear
load 1m
var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}}
var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}}
var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}}
eval instant at 1m histogram_quantile(0.5, var_res_histogram)
{schema="-1"} -2.0
{schema="0"} -1.4142135623730951
{schema="+1"} -1.189207
eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"})
{schema="-1"} 0.5
eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"})
{schema="0"} 0.5
eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"})
{schema="+1"} 0.5
clear
load 1m
var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}}
var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}}
var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}}
eval instant at 1m histogram_quantile(0.5, var_res_histogram)
{schema="-1"} -8.0
{schema="0"} -2.82842712474619
{schema="+1"} -1.6817928305074292
eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"})
{schema="-1"} 0.5
eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"})
{schema="0"} 0.5
eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"})
{schema="+1"} 0.5
# Apply fraction function to empty histogram.
load 10m
histogram_fraction_1 {{}}x1
@ -515,11 +621,18 @@ eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2)
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2)
{} 0.16666666666666666
# Note that this result and the one above add up to 1.
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2)
{} 0.8333333333333334
# We are in the zero bucket, resulting in linear interpolation
eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2)
{} 0.8333333333333334
# Demonstrate that the inverse operation with histogram_quantile yields
# the original value with the non-trivial result above.
eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2)
{} 0.0005
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2)
{} 0
@ -527,17 +640,30 @@ eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2)
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2)
{} 0.25
# More non-trivial results with interpolation involved below, including
# some round-trips via histogram_quantile to prove that the inverse
# operation leads to the same results.
eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2)
{} 0.4795739585136224
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2)
{} 0.125
{} 0.10375937481971091
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2)
{} 0.3333333333333333
eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2)
{} 0.6320802083934297
eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2)
{} 6
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2)
{} 0.2916666666666667
{} 0.29874687506009634
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2)
{} 0.16666666666666666
{} 0.15250624987980724
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2)
{} 0
@ -600,6 +726,12 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3)
eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3)
{} 0.9166666666666666
eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3)
{} -0.0005
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3)
{} 0
@ -625,16 +757,22 @@ eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3)
{} 0.25
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3)
{} 0.125
{} 0.10375937481971091
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3)
{} 0.3333333333333333
eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3)
{} 0.36791979160657035
eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3)
{} -6
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3)
{} 0.2916666666666667
{} 0.29874687506009634
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3)
{} 0.16666666666666666
{} 0.15250624987980724
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3)
{} 0
@ -684,6 +822,18 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4)
eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4)
{} 0.5416666666666666
eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4)
{} 0.0005
eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4)
{} 0.4583333333333333
eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4)
{} -0.0005
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4)
{} 0.4166666666666667
@ -694,31 +844,31 @@ eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4)
{} 0.125
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4)
{} 0.0625
{} 0.051879687409855414
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4)
{} 0.14583333333333334
{} 0.14937343753004825
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4)
{} 0.08333333333333333
{} 0.07625312493990366
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4)
{} 0.125
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4)
{} 0.0625
{} 0.051879687409855456
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4)
{} 0.14583333333333334
{} 0.14937343753004817
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4)
{} 0.08333333333333333
{} 0.07625312493990362
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4)
{} 0

View file

@ -153,19 +153,31 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) {
// histogramQuantile calculates the quantile 'q' based on the given histogram.
//
// The quantile value is interpolated assuming a linear distribution within a
// bucket.
// TODO(beorn7): Find an interpolation method that is a better fit for
// exponential buckets (and think about configurable interpolation).
// For custom buckets, the result is interpolated linearly, i.e. it is assumed
// the observations are uniformly distributed within each bucket. (This is a
// quite blunt assumption, but it is consistent with the interpolation method
// used for classic histograms so far.)
//
// For exponential buckets, the interpolation is done under the assumption that
// the samples within each bucket are distributed in a way that they would
// uniformly populate the buckets in a hypothetical histogram with higher
// resolution. For example, if the rank calculation suggests that the requested
// quantile is right in the middle of the population of the (1,2] bucket, we
// assume the quantile would be right at the bucket boundary between the two
// buckets the (1,2] bucket would be divided into if the histogram had double
// the resolution, which is 2**2**-1 = 1.4142... We call this exponential
// interpolation.
//
// However, for a quantile that ends up in the zero bucket, this method isn't
// very helpful (because there is an infinite number of buckets close to zero,
// so we would have to assume zero as the result). Therefore, we return to
// linear interpolation in the zero bucket.
//
// A natural lower bound of 0 is assumed if the histogram has only positive
// buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has
// only negative buckets.
// TODO(beorn7): Come to terms if we want that.
//
// There are a number of special cases (once we have a way to report errors
// happening during evaluations of AST functions, we should report those
// explicitly):
// There are a number of special cases:
//
// If the histogram has 0 observations, NaN is returned.
//
@ -193,9 +205,9 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
rank float64
)
// if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator
// if the q < 0.5, use the forward iterator
// if the q >= 0.5, use the reverse iterator
// If there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator.
// If q < 0.5, use the forward iterator.
// If q >= 0.5, use the reverse iterator.
if math.IsNaN(h.Sum) || q < 0.5 {
it = h.AllBucketIterator()
rank = q * h.Count
@ -260,8 +272,29 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
rank = count - rank
}
// TODO(codesome): Use a better estimation than linear.
return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count)
// The fraction of how far we are into the current bucket.
fraction := rank / bucket.Count
// Return linear interpolation for custom buckets and for quantiles that
// end up in the zero bucket.
if h.UsesCustomBuckets() || (bucket.Lower <= 0 && bucket.Upper >= 0) {
return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction
}
// For exponential buckets, we interpolate on a logarithmic scale. On a
// logarithmic scale, the exponential bucket boundaries (for any schema)
// become linear (every bucket has the same width). Therefore, after
// taking the logarithm of both bucket boundaries, we can use the
// calculated fraction in the same way as for linear interpolation (see
// above). Finally, we return to the normal scale by applying the
// exponential function to the result.
logLower := math.Log2(math.Abs(bucket.Lower))
logUpper := math.Log2(math.Abs(bucket.Upper))
if bucket.Lower > 0 { // Positive bucket.
return math.Exp2(logLower + (logUpper-logLower)*fraction)
}
// Otherwise, we are in a negative bucket and have to mirror things.
return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction))
}
// histogramFraction calculates the fraction of observations between the
@ -271,8 +304,8 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 {
// histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h)
// returns 0.9.
//
// The same notes (and TODOs) with regard to interpolation and assumptions about
// the zero bucket boundaries apply as for histogramQuantile.
// The same notes with regard to interpolation and assumptions about the zero
// bucket boundaries apply as for histogramQuantile.
//
// Whether either boundary is inclusive or exclusive doesnt actually matter as
// long as interpolation has to be performed anyway. In the case of a boundary
@ -310,7 +343,35 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
)
for it.Next() {
b := it.At()
if b.Lower < 0 && b.Upper > 0 {
zeroBucket := false
// interpolateLinearly is used for custom buckets to be
// consistent with the linear interpolation known from classic
// histograms. It is also used for the zero bucket.
interpolateLinearly := func(v float64) float64 {
return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower)
}
// interpolateExponentially is using the same exponential
// interpolation method as above for histogramQuantile. This
// method is a better fit for exponential bucketing.
interpolateExponentially := func(v float64) float64 {
var (
logLower = math.Log2(math.Abs(b.Lower))
logUpper = math.Log2(math.Abs(b.Upper))
logV = math.Log2(math.Abs(v))
fraction float64
)
if v > 0 {
fraction = (logV - logLower) / (logUpper - logLower)
} else {
fraction = 1 - ((logV - logUpper) / (logLower - logUpper))
}
return rank + b.Count*fraction
}
if b.Lower <= 0 && b.Upper >= 0 {
zeroBucket = true
switch {
case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0:
// This is the zero bucket and the histogram has only
@ -325,10 +386,12 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
}
}
if !lowerSet && b.Lower >= lower {
// We have hit the lower value at the lower bucket boundary.
lowerRank = rank
lowerSet = true
}
if !upperSet && b.Lower >= upper {
// We have hit the upper value at the lower bucket boundary.
upperRank = rank
upperSet = true
}
@ -336,11 +399,21 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6
break
}
if !lowerSet && b.Lower < lower && b.Upper > lower {
lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower)
// The lower value is in this bucket.
if h.UsesCustomBuckets() || zeroBucket {
lowerRank = interpolateLinearly(lower)
} else {
lowerRank = interpolateExponentially(lower)
}
lowerSet = true
}
if !upperSet && b.Lower < upper && b.Upper > upper {
upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower)
// The upper value is in this bucket.
if h.UsesCustomBuckets() || zeroBucket {
upperRank = interpolateLinearly(upper)
} else {
upperRank = interpolateExponentially(upper)
}
upperSet = true
}
if lowerSet && upperSet {

View file

@ -1859,13 +1859,6 @@ func BenchmarkBuildWriteRequest(b *testing.B) {
}
pBuf := proto.NewBuffer(nil)
// Warmup buffers
for i := 0; i < 10; i++ {
populateTimeSeries(batch, seriesBuff, true, true)
buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy")
}
b.ResetTimer()
totalSize := 0
for i := 0; i < b.N; i++ {
populateTimeSeries(batch, seriesBuff, true, true)
@ -1897,45 +1890,43 @@ func BenchmarkBuildWriteRequest(b *testing.B) {
func BenchmarkBuildV2WriteRequest(b *testing.B) {
noopLogger := log.NewNopLogger()
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
{createDummyTimeSeries(2)},
{createDummyTimeSeries(10)},
{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
bench := func(b *testing.B, batch []timeSeries) {
symbolTable := writev2.NewSymbolTable()
buff := make([]byte, 0)
seriesBuff := make([]writev2.TimeSeries, len(tc.batch))
seriesBuff := make([]writev2.TimeSeries, len(batch))
for i := range seriesBuff {
seriesBuff[i].Samples = []writev2.Sample{{}}
seriesBuff[i].Exemplars = []writev2.Exemplar{{}}
}
pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true)
buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
if err != nil {
b.Fatal(err)
}
symbolTable.Reset()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
totalSize := 0
for i := 0; i < b.N; i++ {
populateV2TimeSeries(&symbolTable, batch, seriesBuff, true, true)
req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
if err != nil {
b.Fatal(err)
}
})
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
}
twoBatch := createDummyTimeSeries(2)
tenBatch := createDummyTimeSeries(10)
hundredBatch := createDummyTimeSeries(100)
b.Run("2 instances", func(b *testing.B) {
bench(b, twoBatch)
})
b.Run("10 instances", func(b *testing.B) {
bench(b, tenBatch)
})
b.Run("1k instances", func(b *testing.B) {
bench(b, hundredBatch)
})
}
func TestDropOldTimeSeries(t *testing.T) {

View file

@ -254,6 +254,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc
return nil, err
}
its = append(its, allPostings)
case m.Type == labels.MatchRegexp && m.Value == ".*":
// .* regexp matches any string: do nothing.
case m.Type == labels.MatchNotRegexp && m.Value == ".*":
return index.EmptyPostings(), nil
case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("")

View file

@ -2689,6 +2689,7 @@ func TestPostingsForMatchers(t *testing.T) {
app.Append(0, labels.FromStrings("n", "1"), 0, 0)
app.Append(0, labels.FromStrings("n", "1", "i", "a"), 0, 0)
app.Append(0, labels.FromStrings("n", "1", "i", "b"), 0, 0)
app.Append(0, labels.FromStrings("n", "1", "i", "\n"), 0, 0)
app.Append(0, labels.FromStrings("n", "2"), 0, 0)
app.Append(0, labels.FromStrings("n", "2.5"), 0, 0)
require.NoError(t, app.Commit())
@ -2704,6 +2705,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2722,6 +2724,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
@ -2739,6 +2742,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2750,6 +2754,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2757,6 +2762,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
// Regex.
@ -2766,6 +2772,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2801,6 +2808,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2808,6 +2816,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
// Not regex.
@ -2816,6 +2825,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2849,12 +2859,14 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a?$")},
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2862,6 +2874,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2895,6 +2908,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
},
},
@ -2942,6 +2956,57 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "2.5"),
},
},
// Test shortcut for i=~".*"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
// Test shortcut for n=~".*" and i=~"^.*$"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
// Test shortcut for n=~"^.*$"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchEqual, "i", "a")},
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
},
},
// Test shortcut for i!~".*"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")},
exp: []labels.Labels{},
},
// Test shortcut for n!~"^.*$", i!~".*". First one triggers empty result.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")},
exp: []labels.Labels{},
},
// Test shortcut i!~".*"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")},
exp: []labels.Labels{},
},
// Test shortcut i!~"^.*$"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")},
exp: []labels.Labels{},
},
}
ir, err := h.Index()

12
ui-commits Normal file
View file

@ -0,0 +1,12 @@
dfec29d8e Fix border color for target pools with one target that is failing
65743bf9b ui: drop template readme
a7c1a951d Add general Mantine overrides CSS file
0757fbbec Make sure that alert element table headers are not wrapped
0180cf31a Factor out common icon and card styles
50af7d589 Fix tree line drawing by using a callback ref
ac01dc903 Explain, vector-to-vector: Do not compute results for set operators
9b0dc68d0 PromQL explain view: Support set operators
57898c792 Refactor and fix time formatting functions, add tests
091fc403c Fiddle with targets table styles to try and improve things a bit
a1908df92 Don't wrap action buttons below metric name in metrics explorer
ac5377873 mantine UI: Distinguish between Not Ready and Stopping

View file

@ -1,17 +1,23 @@
import { FC, PropsWithChildren, useEffect, useState } from "react";
import { IconAlertTriangle } from "@tabler/icons-react";
import { useAppDispatch } from "../state/hooks";
import { updateSettings, useSettings } from "../state/settingsSlice";
import { useSuspenseAPIQuery } from "../api/api";
import { WALReplayStatus } from "../api/responseTypes/walreplay";
import { Progress, Stack, Title } from "@mantine/core";
import { Progress, Alert } from "@mantine/core";
import { useSuspenseQuery } from "@tanstack/react-query";
const STATUS_STARTING = "is starting up...";
const STATUS_STOPPING = "is shutting down...";
const STATUS_LOADING = "is not ready...";
const ReadinessLoader: FC = () => {
const { pathPrefix } = useSettings();
const { pathPrefix, agentMode } = useSettings();
const dispatch = useAppDispatch();
// Query key is incremented every second to retrigger the status fetching.
const [queryKey, setQueryKey] = useState(0);
const [statusMessage, setStatusMessage] = useState("");
// Query readiness status.
const { data: ready } = useSuspenseQuery<boolean>({
@ -28,8 +34,16 @@ const ReadinessLoader: FC = () => {
});
switch (res.status) {
case 200:
setStatusMessage(""); // Clear any status message when ready.
return true;
case 503:
// Check the custom header `X-Prometheus-Stopping` for stopping information.
if (res.headers.get("X-Prometheus-Stopping") === "true") {
setStatusMessage(STATUS_STOPPING);
} else {
setStatusMessage(STATUS_STARTING);
}
return false;
default:
throw new Error(res.statusText);
@ -40,14 +54,16 @@ const ReadinessLoader: FC = () => {
},
});
// Query WAL replay status.
// Only call WAL replay status API if the service is starting up.
const shouldQueryWALReplay = statusMessage === STATUS_STARTING;
const {
data: {
data: { min, max, current },
},
data: walData,
isSuccess: walSuccess,
} = useSuspenseAPIQuery<WALReplayStatus>({
path: "/status/walreplay",
key: ["walreplay", queryKey],
enabled: shouldQueryWALReplay, // Only enabled when service is starting up.
});
useEffect(() => {
@ -62,21 +78,28 @@ const ReadinessLoader: FC = () => {
}, []);
return (
<Stack gap="lg" maw={1000} mx="auto" mt="xs">
<Title order={2}>Starting up...</Title>
{max > 0 && (
<Alert
color="yellow"
title={"Prometheus " + (agentMode && "Agent "||"") + (statusMessage || STATUS_LOADING)}
icon={<IconAlertTriangle/>}
maw={500}
mx="auto"
mt="lg"
>
{shouldQueryWALReplay && walSuccess && walData && (
<>
<p>
Replaying WAL ({current}/{max})
</p>
<strong>
Replaying WAL ({walData.data.current}/{walData.data.max})
</strong>
<Progress
size="xl"
animated
value={((current - min + 1) / (max - min + 1)) * 100}
color="yellow"
value={((walData.data.current - walData.data.min + 1) / (walData.data.max - walData.data.min + 1)) * 100}
/>
</>
)}
</Stack>
</Alert>
);
};

View file

@ -380,10 +380,11 @@ export const getUPlotOptions = (
hooks: {
setSelect: [
(self: uPlot) => {
onSelectRange(
self.posToVal(self.select.left, "x"),
self.posToVal(self.select.left + self.select.width, "x")
);
// Disallow sub-second zoom as this cause inconsistenices in the X axis in uPlot.
const leftVal = self.posToVal(self.select.left, "x");
const rightVal = Math.max(self.posToVal(self.select.left + self.select.width, "x"), leftVal + 1);
onSelectRange(leftVal, rightVal);
},
],
},

View file

@ -1277,17 +1277,17 @@ const funcDocs: Record<string, React.ReactNode> = {
</p>
</>
),
holt_winters: (
double_exponential_smoothing: (
<>
<p>
<code>holt_winters(v range-vector, sf scalar, tf scalar)</code> produces a smoothed value for time series based on
<code>double_exponential_smoothing(v range-vector, sf scalar, tf scalar)</code> produces a smoothed value for time series based on
the range in <code>v</code>. The lower the smoothing factor <code>sf</code>, the more importance is given to old
data. The higher the trend factor <code>tf</code>, the more trends in the data is considered. Both <code>sf</code>{' '}
and <code>tf</code> must be between 0 and 1.
</p>
<p>
<code>holt_winters</code> should only be used with gauges.
<code>double_exponential_smoothing</code> should only be used with gauges.
</p>
</>
),

View file

@ -17,7 +17,7 @@ export const functionArgNames: Record<string, string[]> = {
// exp: [],
// floor: [],
histogram_quantile: ['target quantile', 'histogram'],
holt_winters: ['input series', 'smoothing factor', 'trend factor'],
double_exponential_smoothing: ['input series', 'smoothing factor', 'trend factor'],
hour: ['timestamp (default = vector(time()))'],
// idelta: [],
// increase: [],
@ -68,7 +68,7 @@ export const functionDescriptions: Record<string, string> = {
exp: 'calculate exponential function for input vector values',
floor: 'round down values of input series to nearest integer',
histogram_quantile: 'calculate quantiles from histogram buckets',
holt_winters: 'calculate smoothed value of input series',
double_exponential_smoothing: 'calculate smoothed value of input series',
hour: 'return the hour of the day for provided timestamps',
idelta: 'calculate the difference between the last two samples of a range vector (for counters)',
increase: 'calculate the increase in value over a range of time (for counters)',

View file

@ -60,8 +60,8 @@ export const functionSignatures: Record<string, Func> = {
histogram_stddev: { name: 'histogram_stddev', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
histogram_stdvar: { name: 'histogram_stdvar', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
histogram_sum: { name: 'histogram_sum', argTypes: [valueType.vector], variadic: 0, returnType: valueType.vector },
holt_winters: {
name: 'holt_winters',
double_exponential_smoothing: {
name: 'double_exponential_smoothing',
argTypes: [valueType.matrix, valueType.scalar, valueType.scalar],
variadic: 0,
returnType: valueType.vector,

View file

@ -583,12 +583,42 @@ describe('analyzeCompletion test', () => {
pos: 5,
expectedContext: [{ kind: ContextKind.AtModifiers }],
},
{
title: 'autocomplete topk params',
expr: 'topk()',
pos: 5,
expectedContext: [{ kind: ContextKind.Number }],
},
{
title: 'autocomplete topk params 2',
expr: 'topk(inf,)',
pos: 9,
expectedContext: [{ kind: ContextKind.MetricName, metricName: '' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }],
},
{
title: 'autocomplete topk params 3',
expr: 'topk(inf,r)',
pos: 10,
expectedContext: [{ kind: ContextKind.MetricName, metricName: 'r' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }],
},
{
title: 'autocomplete topk params 4',
expr: 'topk by(instance) ()',
pos: 19,
expectedContext: [{ kind: ContextKind.Number }],
},
{
title: 'autocomplete topk params 5',
expr: 'topk by(instance) (inf,r)',
pos: 24,
expectedContext: [{ kind: ContextKind.MetricName, metricName: 'r' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation }],
},
];
testCases.forEach((value) => {
it(value.title, () => {
const state = createEditorState(value.expr);
const node = syntaxTree(state).resolve(value.pos, -1);
const result = analyzeCompletion(state, node);
const result = analyzeCompletion(state, node, value.pos);
expect(value.expectedContext).toEqual(result);
});
});

View file

@ -54,6 +54,12 @@ import {
QuotedLabelName,
NumberDurationLiteralInDurationContext,
NumberDurationLiteral,
AggregateOp,
Topk,
Bottomk,
LimitK,
LimitRatio,
CountValues,
} from '@prometheus-io/lezer-promql';
import { Completion, CompletionContext, CompletionResult } from '@codemirror/autocomplete';
import { EditorState } from '@codemirror/state';
@ -185,7 +191,7 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod
if (node.type.id === LabelMatchers || node.type.id === GroupingLabels) {
start = computeStartCompleteLabelPositionInLabelMatcherOrInGroupingLabel(node, pos);
} else if (
node.type.id === FunctionCallBody ||
(node.type.id === FunctionCallBody && node.firstChild === null) ||
(node.type.id === StringLiteral && (node.parent?.type.id === UnquotedLabelMatcher || node.parent?.type.id === QuotedLabelMatcher))
) {
// When the cursor is between bracket, quote, we need to increment the starting position to avoid to consider the open bracket/ first string.
@ -198,6 +204,7 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod
// So we have to analyze the string about the current node to see if the duration unit is already present or not.
(node.type.id === NumberDurationLiteralInDurationContext && !durationTerms.map((v) => v.label).includes(currentText[currentText.length - 1])) ||
(node.type.id === NumberDurationLiteral && node.parent?.type.id === 0 && node.parent.parent?.type.id === SubqueryExpr) ||
(node.type.id === FunctionCallBody && isAggregatorWithParam(node) && node.firstChild !== null) ||
(node.type.id === 0 &&
(node.parent?.type.id === OffsetExpr ||
node.parent?.type.id === MatrixSelector ||
@ -208,10 +215,21 @@ export function computeStartCompletePosition(state: EditorState, node: SyntaxNod
return start;
}
function isAggregatorWithParam(functionCallBody: SyntaxNode): boolean {
const parent = functionCallBody.parent;
if (parent !== null && parent.firstChild?.type.id === AggregateOp) {
const aggregationOpType = parent.firstChild.firstChild;
if (aggregationOpType !== null && [Topk, Bottomk, LimitK, LimitRatio, CountValues].includes(aggregationOpType.type.id)) {
return true;
}
}
return false;
}
// analyzeCompletion is going to determinate what should be autocompleted.
// The value of the autocompletion is then calculate by the function buildCompletion.
// Note: this method is exported for testing purpose only. Do not use it directly.
export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context[] {
export function analyzeCompletion(state: EditorState, node: SyntaxNode, pos: number): Context[] {
const result: Context[] = [];
switch (node.type.id) {
case 0: // 0 is the id of the error node
@ -330,7 +348,7 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
}
// now we have to know if we have two Expr in the direct children of the `parent`
const containExprTwice = containsChild(parent, 'Expr', 'Expr');
if (containExprTwice) {
if (containExprTwice && parent.type.id !== FunctionCallBody) {
if (parent.type.id === BinaryExpr && !containsAtLeastOneChild(parent, 0)) {
// We are likely in the case 1 or 5
result.push(
@ -460,7 +478,23 @@ export function analyzeCompletion(state: EditorState, node: SyntaxNode): Context
result.push({ kind: ContextKind.Duration });
break;
case FunctionCallBody:
// In this case we are in the given situation:
// For aggregation function such as Topk, the first parameter is a number.
// The second one is an expression.
// When moving to the second parameter, the node is an error node.
// Unfortunately, as a current node, codemirror doesn't give us the error node but instead the FunctionCallBody
// The tree looks like that: PromQL(AggregateExpr(AggregateOp(Topk),FunctionCallBody(NumberDurationLiteral,⚠)))
// So, we need to figure out if the cursor is on the first parameter or in the second.
if (isAggregatorWithParam(node)) {
if (node.firstChild === null || (node.firstChild.from <= pos && node.firstChild.to >= pos)) {
// it means the FunctionCallBody has no child, which means we are autocompleting the first parameter
result.push({ kind: ContextKind.Number });
break;
}
// at this point we are necessary autocompleting the second parameter
result.push({ kind: ContextKind.MetricName, metricName: '' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation });
break;
}
// In all other cases, we are in the given situation:
// sum() or in rate()
// with the cursor between the bracket. So we can autocomplete the metric, the function and the aggregation.
result.push({ kind: ContextKind.MetricName, metricName: '' }, { kind: ContextKind.Function }, { kind: ContextKind.Aggregation });
@ -516,7 +550,11 @@ export class HybridComplete implements CompleteStrategy {
promQL(context: CompletionContext): Promise<CompletionResult | null> | CompletionResult | null {
const { state, pos } = context;
const tree = syntaxTree(state).resolve(pos, -1);
const contexts = analyzeCompletion(state, tree);
// The lines above can help you to print the current lezer tree.
// It's useful when you are trying to understand why it doesn't autocomplete.
// console.log(syntaxTree(state).topNode.toString());
// console.log(`current node: ${tree.type.name}`);
const contexts = analyzeCompletion(state, tree, pos);
let asyncResult: Promise<Completion[]> = Promise.resolve([]);
let completeSnippet = false;
let span = true;

View file

@ -258,7 +258,7 @@ export const functionIdentifierTerms = [
type: 'function',
},
{
label: 'holt_winters',
label: 'double_exponential_smoothing',
detail: 'function',
info: 'Calculate smoothed value of input series',
type: 'function',

View file

@ -46,7 +46,7 @@ import {
HistogramStdDev,
HistogramStdVar,
HistogramSum,
HoltWinters,
DoubleExponentialSmoothing,
Hour,
Idelta,
Increase,
@ -312,8 +312,8 @@ const promqlFunctions: { [key: number]: PromQLFunction } = {
variadic: 0,
returnType: ValueType.vector,
},
[HoltWinters]: {
name: 'holt_winters',
[DoubleExponentialSmoothing]: {
name: 'double_exponential_smoothing',
argTypes: [ValueType.matrix, ValueType.scalar, ValueType.scalar],
variadic: 0,
returnType: ValueType.vector,

View file

@ -20,7 +20,7 @@ export const promQLHighLight = styleTags({
NumberDurationLiteral: tags.number,
NumberDurationLiteralInDurationContext: tags.number,
Identifier: tags.variableName,
'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramAvg HistogramCount HistogramFraction HistogramQuantile HistogramSum HoltWinters Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year':
'Abs Absent AbsentOverTime Acos Acosh Asin Asinh Atan Atanh AvgOverTime Ceil Changes Clamp ClampMax ClampMin Cos Cosh CountOverTime DaysInMonth DayOfMonth DayOfWeek DayOfYear Deg Delta Deriv Exp Floor HistogramAvg HistogramCount HistogramFraction HistogramQuantile HistogramSum DoubleExponentialSmoothing Hour Idelta Increase Irate LabelReplace LabelJoin LastOverTime Ln Log10 Log2 MaxOverTime MinOverTime Minute Month Pi PredictLinear PresentOverTime QuantileOverTime Rad Rate Resets Round Scalar Sgn Sin Sinh Sort SortDesc SortByLabel SortByLabelDesc Sqrt StddevOverTime StdvarOverTime SumOverTime Tan Tanh Time Timestamp Vector Year':
tags.function(tags.variableName),
'Avg Bottomk Count Count_values Group LimitK LimitRatio Max Min Quantile Stddev Stdvar Sum Topk': tags.operatorKeyword,
'By Without Bool On Ignoring GroupLeft GroupRight Offset Start End': tags.modifier,

View file

@ -141,7 +141,7 @@ FunctionIdentifier {
HistogramStdVar |
HistogramSum |
HistogramAvg |
HoltWinters |
DoubleExponentialSmoothing |
Hour |
Idelta |
Increase |
@ -388,7 +388,7 @@ NumberDurationLiteralInDurationContext {
HistogramStdDev { condFn<"histogram_stddev"> }
HistogramStdVar { condFn<"histogram_stdvar"> }
HistogramSum { condFn<"histogram_sum"> }
HoltWinters { condFn<"holt_winters"> }
DoubleExponentialSmoothing { condFn<"double_exponential_smoothing"> }
Hour { condFn<"hour"> }
Idelta { condFn<"idelta"> }
Increase { condFn<"increase"> }

View file

@ -102,6 +102,14 @@ var newUIReactRouterServerPaths = []string{
"/tsdb-status",
}
type ReadyStatus uint32
const (
NotReady ReadyStatus = iota
Ready
Stopping
)
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
@ -331,7 +339,7 @@ func New(logger log.Logger, o *Options) *Handler {
now: model.Now,
}
h.SetReady(false)
h.SetReady(NotReady)
factorySPr := func(_ context.Context) api_v1.ScrapePoolsRetriever { return h.scrapeManager }
factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager }
@ -572,30 +580,39 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
}
// SetReady sets the ready status of our web Handler.
func (h *Handler) SetReady(v bool) {
if v {
h.ready.Store(1)
func (h *Handler) SetReady(v ReadyStatus) {
if v == Ready {
h.ready.Store(uint32(Ready))
h.metrics.readyStatus.Set(1)
return
}
h.ready.Store(0)
h.ready.Store(uint32(v))
h.metrics.readyStatus.Set(0)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
return h.ready.Load() > 0
return ReadyStatus(h.ready.Load()) == Ready
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
switch ReadyStatus(h.ready.Load()) {
case Ready:
f(w, r)
} else {
case NotReady:
w.WriteHeader(http.StatusServiceUnavailable)
w.Header().Set("X-Prometheus-Stopping", "false")
fmt.Fprintf(w, "Service Unavailable")
case Stopping:
w.Header().Set("X-Prometheus-Stopping", "true")
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
default:
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Unknown state")
}
}
}

View file

@ -156,7 +156,7 @@ func TestReadyAndHealthy(t *testing.T) {
cleanupTestResponse(t, resp)
// Set to ready.
webHandler.SetReady(true)
webHandler.SetReady(Ready)
for _, u := range []string{
baseURL + "/-/healthy",
@ -260,7 +260,7 @@ func TestRoutePrefix(t *testing.T) {
cleanupTestResponse(t, resp)
// Set to ready.
webHandler.SetReady(true)
webHandler.SetReady(Ready)
resp, err = http.Get(baseURL + opts.RoutePrefix + "/-/healthy")
require.NoError(t, err)
@ -307,7 +307,7 @@ func TestDebugHandler(t *testing.T) {
},
}
handler := New(nil, opts)
handler.SetReady(true)
handler.SetReady(Ready)
w := httptest.NewRecorder()
@ -349,7 +349,7 @@ func TestHTTPMetrics(t *testing.T) {
counter := handler.metrics.requestCounter
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
handler.SetReady(true)
handler.SetReady(Ready)
for range [2]int{} {
code = getReady()
require.Equal(t, http.StatusOK, code)
@ -358,7 +358,7 @@ func TestHTTPMetrics(t *testing.T) {
require.Equal(t, 2, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusOK)))))
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
handler.SetReady(false)
handler.SetReady(NotReady)
for range [2]int{} {
code = getReady()
require.Equal(t, http.StatusServiceUnavailable, code)
@ -537,7 +537,7 @@ func TestAgentAPIEndPoints(t *testing.T) {
opts.Flags = map[string]string{}
webHandler := New(nil, opts)
webHandler.SetReady(true)
webHandler.SetReady(Ready)
webHandler.config = &config.Config{}
webHandler.notifier = &notifier.Manager{}
l, err := webHandler.Listeners()
@ -692,7 +692,7 @@ func TestMultipleListenAddresses(t *testing.T) {
time.Sleep(5 * time.Second)
// Set to ready.
webHandler.SetReady(true)
webHandler.SetReady(Ready)
for _, port := range []string{port1, port2} {
baseURL := "http://localhost" + port