Merge branch 'prometheus:main' into patch-1

This commit is contained in:
Odysseus Zhang 2024-07-15 10:51:51 +08:00 committed by GitHub
commit 4c74d359eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
59 changed files with 1999 additions and 1109 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
- uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11

View file

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8
uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
with:
sarif_file: results.sarif

View file

@ -2,6 +2,14 @@
## unreleased
## 2.53.1 / 2024-07-10
Fix a bug which would drop samples in remote-write if the sending flow stalled
for longer than it takes to write one "WAL segment". How long this takes depends on the size
of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes.
* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446
## 2.53.0 / 2024-06-16
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.

View file

@ -57,7 +57,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -1 +1 @@
2.53.0
2.53.1

View file

@ -260,7 +260,7 @@ URL query parameters:
series to return. At least one `match[]` argument must be provided.
- `start=<rfc3339 | unix_timestamp>`: Start timestamp.
- `end=<rfc3339 | unix_timestamp>`: End timestamp.
- `limit=<number>`: Maximum number of returned series. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
@ -311,7 +311,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label names. Optional.
- `limit=<number>`: Maximum number of returned series. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label names.
@ -362,7 +362,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label values. Optional.
- `limit=<number>`: Maximum number of returned series. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label values.
@ -694,6 +694,7 @@ URL query parameters:
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
```json
$ curl http://localhost:9090/api/v1/rules

View file

@ -82,6 +82,16 @@ Examples:
-Inf
NaN
As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change.
Examples:
1s # Equivalent to 1.0
2m # Equivalent to 120.0
1ms # Equivalent to 0.001
## Time series selectors
Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values.
@ -224,6 +234,15 @@ Here are some examples of valid time durations:
5m
10s
As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change.
Examples:
1.0 # Equivalent to 1s
0.001 # Equivalent to 1ms
120 # Equivalent to 2m
### Offset modifier
The `offset` modifier allows changing the time offset for individual

View file

@ -98,8 +98,9 @@ vector.
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
Special cases:
- Return an empty vector if `min > max`
- Return `NaN` if `min` or `max` is `NaN`
* Return an empty vector if `min > max`
* Return `NaN` if `min` or `max` is `NaN`
## `clamp_max()`
@ -349,8 +350,8 @@ a histogram.
Buckets of classic histograms are cumulative. Therefore, the following should always be the case:
- The counts in the buckets are monotonically increasing (strictly non-decreasing).
- A lack of observations between the upper limits of two consecutive buckets results in equal counts
* The counts in the buckets are monotonically increasing (strictly non-decreasing).
* A lack of observations between the upper limits of two consecutive buckets results in equal counts
in those two buckets.
However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets
@ -692,21 +693,21 @@ ignore histogram samples.
The trigonometric functions work in radians:
- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
* `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
* `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
* `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
* `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
* `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
* `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
* `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
* `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
* `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
* `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
* `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
* `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
The following are useful for converting between degrees and radians:
- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
- `pi()`: returns pi.
- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.
* `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
* `pi()`: returns pi.
* `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.

View file

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.21
go 1.21.0
require (
github.com/alecthomas/kingpin/v2 v2.4.0

10
go.mod
View file

@ -1,6 +1,8 @@
module github.com/prometheus/prometheus
go 1.21
go 1.21.0
toolchain go1.22.5
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
@ -60,8 +62,8 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.8.0
go.opentelemetry.io/collector/semconv v0.101.0
go.opentelemetry.io/collector/pdata v1.11.0
go.opentelemetry.io/collector/semconv v0.104.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
@ -83,7 +85,7 @@ require (
google.golang.org/api v0.183.0
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
google.golang.org/grpc v1.64.0
google.golang.org/protobuf v1.34.1
google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3

16
go.sum
View file

@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -723,10 +723,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE=
go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE=
go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k=
go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
@ -1119,8 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -86,7 +86,6 @@ func (h Histogram) IsFloatHistogram() bool {
// ToIntHistogram returns integer Prometheus histogram from the remote implementation
// of integer histogram. If it's a float histogram, the method returns nil.
// TODO(bwplotka): Add support for incoming NHCB.
func (h Histogram) ToIntHistogram() *histogram.Histogram {
if h.IsFloatHistogram() {
return nil
@ -102,13 +101,13 @@ func (h Histogram) ToIntHistogram() *histogram.Histogram {
PositiveBuckets: h.GetPositiveDeltas(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeDeltas(),
CustomValues: h.GetCustomValues(),
}
}
// ToFloatHistogram returns float Prometheus histogram from the remote implementation
// of float histogram. If the underlying implementation is an integer histogram, a
// conversion is performed.
// TODO(bwplotka): Add support for incoming NHCB.
func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
if h.IsFloatHistogram() {
return &histogram.FloatHistogram{
@ -122,6 +121,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
PositiveBuckets: h.GetPositiveCounts(),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: h.GetNegativeCounts(),
CustomValues: h.GetCustomValues(),
}
}
// Conversion from integer histogram.
@ -136,6 +136,7 @@ func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram {
PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()),
NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()),
NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()),
CustomValues: h.GetCustomValues(),
}
}
@ -171,6 +172,7 @@ func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram {
PositiveSpans: spansToSpansProto(h.PositiveSpans),
PositiveDeltas: h.PositiveBuckets,
ResetHint: Histogram_ResetHint(h.CounterResetHint),
CustomValues: h.CustomValues,
Timestamp: timestamp,
}
}
@ -188,6 +190,7 @@ func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram
PositiveSpans: spansToSpansProto(fh.PositiveSpans),
PositiveCounts: fh.PositiveBuckets,
ResetHint: Histogram_ResetHint(fh.CounterResetHint),
CustomValues: fh.CustomValues,
Timestamp: timestamp,
}
}

View file

@ -144,10 +144,12 @@ func TestToHistogram_Empty(t *testing.T) {
})
}
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
// important cases to test when copying or converting to/from int/float histograms.
func testIntHistogram() histogram.Histogram {
return histogram.Histogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Schema: 1,
Count: 19,
Sum: 2.7,
ZeroThreshold: 1e-128,
@ -163,13 +165,16 @@ func testIntHistogram() histogram.Histogram {
{Offset: 0, Length: 1},
},
NegativeBuckets: []int64{1, 2, -2, 1, -1, 0},
CustomValues: []float64{21421, 523},
}
}
// NOTE(bwplotka): This is technically not a valid histogram, but it represents
// important cases to test when copying or converting to/from int/float histograms.
func testFloatHistogram() histogram.FloatHistogram {
return histogram.FloatHistogram{
CounterResetHint: histogram.GaugeType,
Schema: 0,
Schema: 1,
Count: 19,
Sum: 2.7,
ZeroThreshold: 1e-128,
@ -185,22 +190,29 @@ func testFloatHistogram() histogram.FloatHistogram {
{Offset: 0, Length: 1},
},
NegativeBuckets: []float64{1, 3, 1, 2, 1, 1},
CustomValues: []float64{21421, 523},
}
}
func TestFromIntToFloatOrIntHistogram(t *testing.T) {
t.Run("v1", func(t *testing.T) {
// v1 does not support nhcb.
testIntHistWithoutNHCB := testIntHistogram()
testIntHistWithoutNHCB.CustomValues = nil
testFloatHistWithoutNHCB := testFloatHistogram()
testFloatHistWithoutNHCB.CustomValues = nil
h := prompb.FromIntHistogram(123, &testIntHistWithoutNHCB)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHistWithoutNHCB, *h.ToIntHistogram())
require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
testIntHist := testIntHistogram()
testFloatHist := testFloatHistogram()
t.Run("v1", func(t *testing.T) {
h := prompb.FromIntHistogram(123, testIntHist.Copy())
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHist, *h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
h := writev2.FromIntHistogram(123, testIntHist.Copy())
h := writev2.FromIntHistogram(123, &testIntHist)
require.False(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Equal(t, testIntHist, *h.ToIntHistogram())
@ -209,17 +221,21 @@ func TestFromIntToFloatOrIntHistogram(t *testing.T) {
}
func TestFromFloatToFloatHistogram(t *testing.T) {
testFloatHist := testFloatHistogram()
t.Run("v1", func(t *testing.T) {
h := prompb.FromFloatHistogram(123, testFloatHist.Copy())
// v1 does not support nhcb.
testFloatHistWithoutNHCB := testFloatHistogram()
testFloatHistWithoutNHCB.CustomValues = nil
h := prompb.FromFloatHistogram(123, &testFloatHistWithoutNHCB)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())
require.Equal(t, testFloatHist, *h.ToFloatHistogram())
require.Equal(t, testFloatHistWithoutNHCB, *h.ToFloatHistogram())
})
t.Run("v2", func(t *testing.T) {
h := writev2.FromFloatHistogram(123, testFloatHist.Copy())
testFloatHist := testFloatHistogram()
h := writev2.FromFloatHistogram(123, &testFloatHist)
require.True(t, h.IsFloatHistogram())
require.Equal(t, int64(123), h.Timestamp)
require.Nil(t, h.ToIntHistogram())

View file

@ -3450,6 +3450,12 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) {
// required for correctness.
func detectHistogramStatsDecoding(expr parser.Expr) {
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop")
}
n, ok := (node).(*parser.VectorSelector)
if !ok {
return nil

View file

@ -238,11 +238,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab
return errSeriesSet{err: q.err}
}
func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (*errQuerier) Close() error { return nil }

View file

@ -43,7 +43,6 @@ import (
int int64
uint uint64
float float64
duration time.Duration
}
@ -176,8 +175,7 @@ START_METRIC_SELECTOR
%type <int> int
%type <uint> uint
%type <float> number series_value signed_number signed_or_unsigned_number
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%type <duration> duration maybe_duration
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%start start
@ -218,7 +216,7 @@ expr :
| binary_expr
| function_call
| matrix_selector
| number_literal
| number_duration_literal
| offset_expr
| paren_expr
| string_literal
@ -415,18 +413,22 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN
* Offset modifiers.
*/
offset_expr: expr OFFSET duration
offset_expr: expr OFFSET number_duration_literal
{
yylex.(*parser).addOffset($1, $3)
numLit, _ := $3.(*NumberLiteral)
dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, dur)
$$ = $1
}
| expr OFFSET SUB duration
| expr OFFSET SUB number_duration_literal
{
yylex.(*parser).addOffset($1, -$4)
numLit, _ := $4.(*NumberLiteral)
dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, -dur)
$$ = $1
}
| expr OFFSET error
{ yylex.(*parser).unexpected("offset", "duration"); $$ = $1 }
{ yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
;
/*
* @ modifiers.
@ -452,7 +454,7 @@ at_modifier_preprocessors: START | END;
* Subquery and range selectors.
*/
matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
{
var errMsg string
vs, ok := $1.(*VectorSelector)
@ -469,32 +471,44 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
yylex.(*parser).addParseErrf(errRange, errMsg)
}
numLit, _ := $3.(*NumberLiteral)
$$ = &MatrixSelector{
VectorSelector: $1.(Expr),
Range: $3,
Range: time.Duration(numLit.Val * 1000) * time.Millisecond,
EndPos: yylex.(*parser).lastClosing,
}
}
;
subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET
subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET
{
numLitRange, _ := $3.(*NumberLiteral)
numLitStep, _ := $5.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
Range: $3,
Step: $5,
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond,
EndPos: $6.Pos + 1,
}
}
| expr LEFT_BRACKET duration COLON duration error
| expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET
{
numLitRange, _ := $3.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: 0,
EndPos: $5.Pos + 1,
}
}
| expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
| expr LEFT_BRACKET duration COLON error
{ yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET duration error
| expr LEFT_BRACKET number_duration_literal COLON error
{ yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET number_duration_literal error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error
{ yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 }
{ yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
;
/*
@ -866,16 +880,43 @@ match_op : EQL | NEQ | EQL_REGEX | NEQ_REGEX ;
* Literals.
*/
number_literal : NUMBER
number_duration_literal : NUMBER
{
$$ = &NumberLiteral{
Val: yylex.(*parser).number($1.Val),
PosRange: $1.PositionRange(),
}
}
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = &NumberLiteral{
Val: dur.Seconds(),
PosRange: $1.PositionRange(),
}
}
;
number : NUMBER { $$ = yylex.(*parser).number($1.Val) } ;
number : NUMBER
{
$$ = yylex.(*parser).number($1.Val)
}
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = dur.Seconds()
}
;
signed_number : ADD number { $$ = $2 }
| SUB number { $$ = -$2 }
@ -897,17 +938,6 @@ int : SUB uint { $$ = -int64($2) }
| uint { $$ = int64($1) }
;
duration : DURATION
{
var err error
$$, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
}
;
string_literal : STRING
{
$$ = &StringLiteral{
@ -931,11 +961,6 @@ string_identifier : STRING
* Wrappers for optional arguments.
*/
maybe_duration : /* empty */
{$$ = 0}
| duration
;
maybe_grouping_labels: /* empty */ { $$ = nil }
| grouping_labels
;

File diff suppressed because it is too large Load diff

View file

@ -478,7 +478,7 @@ func lexStatements(l *Lexer) stateFn {
skipSpaces(l)
}
l.bracketOpen = true
return lexDuration
return lexNumberOrDuration
case r == ']':
if !l.bracketOpen {
return l.errorf("unexpected right bracket %q", r)
@ -846,18 +846,6 @@ func lexLineComment(l *Lexer) stateFn {
return lexStatements
}
func lexDuration(l *Lexer) stateFn {
if l.scanNumber() {
return l.errorf("missing unit character in duration")
}
if !acceptRemainingDuration(l) {
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
}
l.backup()
l.emit(DURATION)
return lexStatements
}
// lexNumber scans a number: decimal, hex, oct or float.
func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() {
@ -909,6 +897,7 @@ func acceptRemainingDuration(l *Lexer) bool {
// scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser.
func (l *Lexer) scanNumber() bool {
initialPos := l.pos
// Modify the digit pattern if the number is hexadecimal.
digitPattern := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous.
@ -980,7 +969,10 @@ func (l *Lexer) scanNumber() bool {
// Handle digits at the end since we already consumed before this loop.
l.acceptRun(digitPattern)
}
// Empty string is not a valid number.
if l.pos == initialPos {
return false
}
// Next thing must not be alphanumeric unless it's the times token
// for series repetitions.
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {

View file

@ -2133,6 +2133,115 @@ var testExpr = []struct {
EndPos: 25,
},
},
{
input: `test{a="b"}[5m] OFFSET 3600`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
OriginalOffset: 1 * time.Hour,
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, "a", "b"),
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 27,
},
},
{
input: `foo[3ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 3 * time.Millisecond,
EndPos: 16,
},
},
{
input: `foo[4s180ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 20,
},
},
{
input: `foo[4.18] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 17,
},
},
{
input: `foo[4s18ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 19,
},
},
{
input: `foo[4.018] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 18,
},
},
{
input: `test{a="b"}[5y] @ 1603774699`,
expected: &MatrixSelector{
@ -2152,15 +2261,50 @@ var testExpr = []struct {
EndPos: 28,
},
},
{
input: "test[5]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 5 * time.Second,
EndPos: 7,
},
},
{
input: `some_metric[5m] @ 1m`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "some_metric",
Timestamp: makeInt64Pointer(60000),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 20,
},
},
{
input: `foo[5mm]`,
fail: true,
errMsg: "bad duration syntax: \"5mm\"",
errMsg: "bad number or duration syntax: \"5mm\"",
},
{
input: `foo[5m1]`,
fail: true,
errMsg: "bad duration syntax: \"5m1\"",
errMsg: "bad number or duration syntax: \"5m1\"",
},
{
input: `foo[5m:1m1]`,
@ -2194,17 +2338,12 @@ var testExpr = []struct {
{
input: `foo[]`,
fail: true,
errMsg: "missing unit character in duration",
errMsg: "bad number or duration syntax: \"\"",
},
{
input: `foo[1]`,
input: `foo[-1]`,
fail: true,
errMsg: "missing unit character in duration",
},
{
input: `some_metric[5m] OFFSET 1`,
fail: true,
errMsg: "unexpected number \"1\" in offset, expected duration",
errMsg: "bad number or duration syntax: \"\"",
},
{
input: `some_metric[5m] OFFSET 1mm`,
@ -2214,18 +2353,13 @@ var testExpr = []struct {
{
input: `some_metric[5m] OFFSET`,
fail: true,
errMsg: "unexpected end of input in offset, expected duration",
errMsg: "unexpected end of input in offset, expected number or duration",
},
{
input: `some_metric OFFSET 1m[5m]`,
fail: true,
errMsg: "1:22: parse error: no offset modifiers allowed before range",
},
{
input: `some_metric[5m] @ 1m`,
fail: true,
errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp",
},
{
input: `some_metric[5m] @`,
fail: true,
@ -2910,6 +3044,11 @@ var testExpr = []struct {
errMsg: "illegal character U+002E '.' in escape sequence",
},
// Subquery.
{
input: `foo{bar="baz"}[`,
fail: true,
errMsg: `1:16: parse error: bad number or duration syntax: ""`,
},
{
input: `foo{bar="baz"}[10m:6s]`,
expected: &SubqueryExpr{

View file

@ -10,22 +10,54 @@ eval instant at 10s metric @ 100
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 100s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 1m40s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 100 offset 50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 100 offset 50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset 50s @ 100
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset 50 @ 100
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset -50s @ 0
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset -50 @ 0
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s -metric @ 100
{job="1"} -10
{job="2"} -20
@ -48,6 +80,12 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100 offset 50s)
eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100)
{job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100 offset 50)
{job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100)
{job="1"} 15
# Different timestamps.
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
{job="1"} 15
@ -58,6 +96,9 @@ eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metri
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
{job="1"} 165
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100) + label_replace(sum_over_time(metric{job="2"}[100] @ 100), "job", "1", "", "")
{job="1"} 165
# Subqueries.
# 10*(1+2+...+9) + 10.
@ -72,6 +113,10 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] @ 100 offset 20s)
eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
{job="1"} 288
# 10*(1+2+...+7) + 8.
eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
{job="1"} 288
# Subquery with different timestamps.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.

View file

@ -10,6 +10,11 @@ eval instant at 50m resets(http_requests[5m])
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[300])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[20m])
{path="/foo"} 1
{path="/bar"} 0
@ -239,10 +244,16 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
{} 76.81818181818181
# intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h)
{} 76.81818181818181
# intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600)
{} 51.36363636363637

View file

@ -715,6 +715,9 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
{} 1
eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4)
{} 100
clear
# Counter reset only noticeable in a single bucket.

View file

@ -32,6 +32,9 @@ eval instant at 20s count_over_time(metric[1s])
eval instant at 20s count_over_time(metric[10s])
{} 1
eval instant at 20s count_over_time(metric[10])
{} 1
clear

View file

@ -76,6 +76,21 @@ eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s)
eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time(metric1[30:10] offset 3)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
{} 297
# Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.4

View file

@ -151,7 +151,42 @@ func (g *Group) Name() string { return g.name }
func (g *Group) File() string { return g.file }
// Rules returns the group's rules.
func (g *Group) Rules() []Rule { return g.rules }
func (g *Group) Rules(matcherSets ...[]*labels.Matcher) []Rule {
if len(matcherSets) == 0 {
return g.rules
}
var rules []Rule
for _, rule := range g.rules {
if matchesMatcherSets(matcherSets, rule.Labels()) {
rules = append(rules, rule)
}
}
return rules
}
func matches(lbls labels.Labels, matchers ...*labels.Matcher) bool {
for _, m := range matchers {
if v := lbls.Get(m.Name); !m.Matches(v) {
return false
}
}
return true
}
// matchesMatcherSets ensures all matches in each matcher set are ANDed and the set of those is ORed.
func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) bool {
if len(matcherSets) == 0 {
return true
}
var ok bool
for _, matchers := range matcherSets {
if matches(lbls, matchers...) {
ok = true
}
}
return ok
}
// Queryable returns the group's querable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }

View file

@ -380,13 +380,13 @@ func (m *Manager) RuleGroups() []*Group {
}
// Rules returns the list of the manager's rules.
func (m *Manager) Rules() []Rule {
func (m *Manager) Rules(matcherSets ...[]*labels.Matcher) []Rule {
m.mtx.RLock()
defer m.mtx.RUnlock()
var rules []Rule
for _, g := range m.groups {
rules = append(rules, g.rules...)
rules = append(rules, g.Rules(matcherSets...)...)
}
return rules

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:

View file

@ -238,11 +238,11 @@ func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels
return storage.ErrSeriesSet(errSelect)
}
func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label values error")
}
func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label names error")
}

View file

@ -122,11 +122,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
}
func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
@ -161,12 +161,12 @@ type LabelQuerier interface {
// It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// LabelNames returns all the unique label names present in the block in sorted order.
// If matchers are specified the returned result set is reduced
// to label names of metrics matching the matchers.
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// Close releases the resources of the Querier.
Close() error
@ -190,6 +190,9 @@ type SelectHints struct {
Start int64 // Start time in milliseconds for this select.
End int64 // End time in milliseconds for this select.
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
Step int64 // Query step size in milliseconds.
Func string // String representation of surrounding function or aggregation.
@ -217,6 +220,13 @@ type SelectHints struct {
DisableTrimming bool
}
// LabelHints specifies hints passed for label reads.
// This is used only as an option for implementation to use.
type LabelHints struct {
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
}
// TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc.

View file

@ -169,8 +169,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
// LabelValues returns all potential values for a label name.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(ctx, q.queriers, name, matchers...)
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...)
if err != nil {
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
}
@ -178,22 +178,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matc
}
// lvals performs merge sort for LabelValues from multiple queriers.
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if lq.Len() == 0 {
return nil, nil, nil
}
if lq.Len() == 1 {
return lq.Get(0).LabelValues(ctx, n, matchers...)
return lq.Get(0).LabelValues(ctx, n, hints, matchers...)
}
a, b := lq.SplitByHalf()
var ws annotations.Annotations
s1, w, err := q.lvals(ctx, a, n, matchers...)
s1, w, err := q.lvals(ctx, a, n, hints, matchers...)
ws.Merge(w)
if err != nil {
return nil, ws, err
}
s2, ws, err := q.lvals(ctx, b, n, matchers...)
s2, ws, err := q.lvals(ctx, b, n, hints, matchers...)
ws.Merge(w)
if err != nil {
return nil, ws, err
@ -229,13 +229,13 @@ func mergeStrings(a, b []string) []string {
}
// LabelNames returns all the unique label names present in all queriers in sorted order.
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
var (
labelNamesMap = make(map[string]struct{})
warnings annotations.Annotations
)
for _, querier := range q.queriers {
names, wrn, err := querier.LabelNames(ctx, matchers...)
names, wrn, err := querier.LabelNames(ctx, hints, matchers...)
if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings.
warnings.Merge(wrn)

View file

@ -1361,7 +1361,7 @@ func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
}
func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name,
@ -1371,7 +1371,7 @@ func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matcher
return m.resp, m.warnings, m.err
}
func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock()
m.labelNamesCalls++
m.mtx.Unlock()
@ -1558,7 +1558,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}
})
t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames(ctx)
res, w, err := q.LabelNames(ctx, nil)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
@ -1573,7 +1573,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}
})
t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues(ctx, "test")
res, w, err := q.LabelValues(ctx, "test", nil)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)
@ -1589,7 +1589,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
})
t.Run("LabelValuesWithMatchers", func(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
res, w, err := q.LabelValues(ctx, "test2", matcher)
res, w, err := q.LabelValues(ctx, "test2", nil, matcher)
require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res)

View file

@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche
return NoopSeriesSet()
}
func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M
return NoopChunkedSeriesSet()
}
func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}
func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil
}

View file

@ -36,7 +36,8 @@ import (
"github.com/prometheus/prometheus/util/annotations"
)
var testHistogram = histogram.Histogram{
var (
testHistogram = histogram.Histogram{
Schema: 2,
ZeroThreshold: 1e-128,
ZeroCount: 0,
@ -48,7 +49,7 @@ var testHistogram = histogram.Histogram{
NegativeBuckets: []int64{-1},
}
var writeRequestFixture = &prompb.WriteRequest{
writeRequestFixture = &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{
{
Labels: []prompb.Label{
@ -58,9 +59,9 @@ var writeRequestFixture = &prompb.WriteRequest{
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
Labels: []prompb.Label{
@ -70,14 +71,13 @@ var writeRequestFixture = &prompb.WriteRequest{
{Name: "d", Value: "e"},
{Name: "foo", Value: "bar"},
},
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
Samples: []prompb.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
}
var (
writeV2RequestSeries1Metadata = metadata.Metadata{
Type: model.MetricTypeGauge,
Help: "Test gauge for test purposes",
@ -88,42 +88,77 @@ var (
Help: "Test counter for test purposes",
}
// writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation.
writeV2RequestFixture = func() *writev2.Request {
// writeV2RequestFixture represents the same request as writeRequestFixture,
// but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata.
// NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
writeV2RequestFixture = &writev2.Request{
Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
Timeseries: []writev2.TimeSeries{
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type.
HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type.
HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help.
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
}
)
func TestWriteV2RequestFixture(t *testing.T) {
// Generate dynamically writeV2RequestFixture, reusing v1 fixture elements.
st := writev2.NewSymbolTable()
b := labels.NewScratchBuilder(0)
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
return &writev2.Request{
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
expected := &writev2.Request{
Timeseries: []writev2.TimeSeries{
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2.
Type: writev2.Metadata_METRIC_TYPE_GAUGE,
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 0}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: labelRefs,
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2.
Type: writev2.Metadata_METRIC_TYPE_COUNTER,
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
Symbols: st.Symbols(),
}
}()
)
// Check if it matches static writeV2RequestFixture.
require.Equal(t, expected, writeV2RequestFixture)
}
func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct {

View file

@ -39,9 +39,3 @@ func (m *maxTimestamp) Get() float64 {
defer m.mtx.Unlock()
return m.value
}
func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) {
if m.Get() > 0 {
m.Gauge.Collect(c)
}
}

View file

@ -232,7 +232,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Namespace: namespace,
Subsystem: subsystem,
Name: "queue_highest_sent_timestamp_seconds",
Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.",
Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
ConstLabels: constLabels,
}),
}
@ -1468,6 +1468,8 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) {
for q.tryEnqueueingBatch(done) {
time.Sleep(time.Second)
}
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
q.batch = nil
close(q.batchQueue)
}

View file

@ -60,7 +60,7 @@ func newHighestTimestampMetric() *maxTimestamp {
Namespace: namespace,
Subsystem: subsystem,
Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet",
}),
}
}

View file

@ -210,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
}
// LabelValues implements storage.Querier and is a noop.
func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}
// LabelNames implements storage.Querier and is a noop.
func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented")
}

View file

@ -100,7 +100,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
Namespace: namespace,
Subsystem: subsystem,
Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.",
}),
},
}

View file

@ -19,6 +19,7 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
@ -27,6 +28,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar"
@ -44,6 +46,7 @@ type writeHandler struct {
appendable storage.Appendable
samplesWithInvalidLabelsTotal prometheus.Counter
samplesAppendedWithoutMetadata prometheus.Counter
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
}
@ -52,6 +55,9 @@ const maxAheadTime = 10 * time.Minute
// NewWriteHandler creates a http.Handler that accepts remote write requests with
// the given message in acceptedProtoMsgs and writes them to the provided appendable.
//
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
for _, acc := range acceptedProtoMsgs {
@ -61,15 +67,18 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
logger: logger,
appendable: appendable,
acceptedProtoMsgs: protoMsgs,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{
samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "remote_write_invalid_labels_samples_total",
Help: "The total number of remote write samples which contains invalid labels.",
Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.",
}),
samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "remote_write_without_metadata_appended_samples_total",
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
}),
}
if reg != nil {
reg.MustRegister(h.samplesWithInvalidLabelsTotal)
}
return h
}
@ -108,15 +117,15 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
contentType = appProtoContentType
}
msg, err := h.parseProtoMsg(contentType)
msgType, err := h.parseProtoMsg(contentType)
if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
return
}
if _, ok := h.acceptedProtoMsgs[msg]; !ok {
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) {
if _, ok := h.acceptedProtoMsgs[msgType]; !ok {
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) {
for k := range h.acceptedProtoMsgs {
ret = append(ret, string(k))
}
@ -154,100 +163,111 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// Now we have a decompressed buffer we can unmarshal it.
switch msg {
case config.RemoteWriteProtoMsgV1:
if msgType == config.RemoteWriteProtoMsgV1 {
// PRW 1.0 flow has different proto message and no partial write handling.
var req prompb.WriteRequest
if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error())
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = h.write(r.Context(), &req)
case config.RemoteWriteProtoMsgV2:
var req writev2.Request
if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = h.writeV2(r.Context(), &req)
}
if err = h.write(r.Context(), &req); err != nil {
switch {
case err == nil:
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
// Indicated an out of order sample is a bad request to prevent retries.
// Indicated an out-of-order sample is a bad request to prevent retries.
http.Error(w, err.Error(), http.StatusBadRequest)
return
default:
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error())
level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
w.WriteHeader(http.StatusNoContent)
return
}
// checkAppendExemplarError modifies the AppendExemplar's returned error based on the error cause.
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
// Remote Write 2.x proto message handling.
var req writev2.Request
if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
switch {
case errors.Is(unwrappedErr, storage.ErrNotFound):
return storage.ErrNotFound
case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar):
*outOfOrderErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
return nil
default:
return err
respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
// Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
respStats.SetResponseHeaders(w.Header())
if err != nil {
if errHTTPCode/5 == 100 { // 5xx
level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error())
}
http.Error(w, err.Error(), errHTTPCode)
return
}
w.WriteHeader(http.StatusNoContent)
}
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
outOfOrderExemplarErrs := 0
samplesWithInvalidLabels := 0
samplesAppended := 0
timeLimitApp := &timeLimitAppender{
app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
defer func() {
if err != nil {
_ = timeLimitApp.Rollback()
_ = app.Rollback()
return
}
err = timeLimitApp.Commit()
err = app.Commit()
if err != nil {
h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended))
}
}()
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, nil)
if !ls.IsValid() {
if !ls.Has(labels.MetricName) || !ls.IsValid() {
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
samplesWithInvalidLabels++
// TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
// potentially written. Perhaps unify with fixed writeV2 implementation a bit.
continue
}
err := h.appendSamples(timeLimitApp, ts.Samples, ls)
if err != nil {
if err := h.appendV1Samples(app, ts.Samples, ls); err != nil {
return err
}
samplesAppended += len(ts.Samples)
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil)
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
if _, err := app.AppendExemplar(0, ls, e); err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderExemplar):
outOfOrderExemplarErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
default:
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
}
}
err = h.appendHistograms(timeLimitApp, ts.Histograms, ls)
if err != nil {
if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil {
return err
}
samplesAppended += len(ts.Histograms)
}
if outOfOrderExemplarErrs > 0 {
@ -256,151 +276,216 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
if samplesWithInvalidLabels > 0 {
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
}
return nil
}
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) {
outOfOrderExemplarErrs := 0
func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
timeLimitApp := &timeLimitAppender{
func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
}
const (
prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples"
rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms"
rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars"
)
type responseStats struct {
samples int
histograms int
exemplars int
}
func (s responseStats) SetResponseHeaders(h http.Header) {
h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples))
h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms))
h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars))
}
// writeV2 is similar to write, but it works with v2 proto message,
// allows partial 4xx writes and gathers statistics.
//
// writeV2 returns the statistics.
// In error cases, writeV2, also returns statistics, but also the error that
// should be propagated to the remote write sender and httpCode to use for status.
//
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
// Once we have 5xx type of error, we immediately stop and rollback all appends.
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) {
app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
}
defer func() {
rs := responseStats{}
samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs)
if err != nil {
_ = timeLimitApp.Rollback()
return
if errHTTPCode/5 == 100 {
// On 5xx, we always rollback, because we expect
// sender to retry and TSDB is not idempotent.
if rerr := app.Rollback(); rerr != nil {
level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr)
}
return responseStats{}, errHTTPCode, err
}
err = timeLimitApp.Commit()
}()
b := labels.NewScratchBuilder(0)
// Non-retriable (e.g. bad request error case). Can be partially written.
commitErr := app.Commit()
if commitErr != nil {
// Bad requests does not matter as we have internal error (retryable).
return responseStats{}, http.StatusInternalServerError, commitErr
}
// Bad request error happened, but rest of data (if any) was written.
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return rs, errHTTPCode, err
}
// All good just commit.
if err := app.Commit(); err != nil {
return responseStats{}, http.StatusInternalServerError, err
}
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return rs, 0, nil
}
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
var (
badRequestErrs []error
outOfOrderExemplarErrs, samplesWithInvalidLabels int
b = labels.NewScratchBuilder(0)
)
for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, req.Symbols)
err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls)
if err != nil {
return err
// Validate series labels early.
// NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
// specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
if !ls.Has(labels.MetricName) || !ls.IsValid() {
badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
continue
}
allSamplesSoFar := rs.samples + rs.histograms
var ref storage.SeriesRef
// Samples.
for _, s := range ts.Samples {
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
if err == nil {
rs.samples++
continue
}
// Handle append error.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
errors.Is(err, storage.ErrTooOldSample) {
// TODO(bwplotka): Not too spammy log?
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err
}
// Native Histograms.
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err == nil {
rs.histograms++
continue
}
// Handle append error.
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
// TODO(bwplotka): Not too spammy log?
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err
}
// Exemplars.
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, req.Symbols)
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs)
ref, err = app.AppendExemplar(ref, ls, e)
if err == nil {
rs.exemplars++
continue
}
err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls)
if err != nil {
return err
// Handle append error.
// TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms.
// Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors.
if errors.Is(err, storage.ErrOutOfOrderExemplar) {
outOfOrderExemplarErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
continue
}
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
m := ts.ToMetadata(req.Symbols)
if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil {
if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
// Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
// we don't report remote write error either. We increment metric instead.
samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar
}
}
if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
}
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
return nil
if len(badRequestErrs) == 0 {
return samplesWithoutMetadata, 0, nil
}
func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) {
_, err := app.AppendExemplar(0, labels, e)
err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
if err != nil {
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
}
func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
// TODO(bwplotka): Better concat formatting? Perhaps add size limit?
return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
}
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and

View file

@ -16,6 +16,7 @@ package remote
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
@ -27,6 +28,7 @@ import (
"time"
"github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
@ -290,8 +292,134 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
}
}
func expectHeaderValue(t testing.TB, expected int, got string) {
t.Helper()
require.NotEmpty(t, got)
i, err := strconv.Atoi(got)
require.NoError(t, err)
require.Equal(t, expected, i)
}
func TestRemoteWriteHandler_V2Message(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
// V2 supports partial writes for non-retriable errors, so test them.
for _, tc := range []struct {
desc string
input []writev2.TimeSeries
expectedCode int
expectedRespBody string
commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
}{
{
desc: "All timeseries accepted",
input: writeV2RequestFixture.Timeseries,
expectedCode: http.StatusNoContent,
},
{
desc: "Partial write; first series with invalid labels (no metric name)",
input: append(
// Series with test_metric1="test_metric1" labels.
[]writev2.TimeSeries{{LabelsRefs: []uint32{2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
writeV2RequestFixture.Timeseries...),
expectedCode: http.StatusBadRequest,
expectedRespBody: "invalid metric name or labels, got {test_metric1=\"test_metric1\"}\n",
},
{
desc: "Partial write; first series with invalid labels (empty metric name)",
input: append(
// Series with __name__="" labels.
[]writev2.TimeSeries{{LabelsRefs: []uint32{1, 0}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
writeV2RequestFixture.Timeseries...),
expectedCode: http.StatusBadRequest,
expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n",
},
{
desc: "Partial write; first series with one OOO sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, writev2.Sample{Value: 2, Timestamp: 0})
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one dup sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, f.Timeseries[0].Samples[0])
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one OOO histogram sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil)))
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one dup histogram sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[1])
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
// Non retriable errors from various parts.
{
desc: "Internal sample append error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
appendSampleErr: errors.New("some sample internal append error"),
expectedCode: http.StatusInternalServerError,
expectedRespBody: "some sample internal append error\n",
},
{
desc: "Internal histogram sample append error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
appendHistogramErr: errors.New("some histogram sample internal append error"),
expectedCode: http.StatusInternalServerError,
expectedRespBody: "some histogram sample internal append error\n",
},
{
desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
input: writeV2RequestFixture.Timeseries,
appendExemplarErr: errors.New("some exemplar append error"),
expectedCode: http.StatusNoContent,
},
{
desc: "Partial write; skipped metadata; metadata storage errs are noop",
input: writeV2RequestFixture.Timeseries,
updateMetadataErr: errors.New("some metadata update error"),
expectedCode: http.StatusNoContent,
},
{
desc: "Internal commit error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
commitErr: errors.New("storage error"),
expectedCode: http.StatusInternalServerError,
expectedRespBody: "storage error\n",
},
} {
t.Run(tc.desc, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
@ -301,40 +429,59 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{}
appendable := &mockAppendable{
commitErr: tc.commitErr,
appendSampleErr: tc.appendSampleErr,
appendHistogramErr: tc.appendHistogramErr,
appendExemplarErr: tc.appendExemplarErr,
updateMetadataErr: tc.updateMetadataErr,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusNoContent, resp.StatusCode)
require.Equal(t, tc.expectedCode, resp.StatusCode)
respBody, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, tc.expectedRespBody, string(respBody))
b := labels.NewScratchBuilder(0)
i := 0
j := 0
k := 0
if tc.expectedCode == http.StatusInternalServerError {
// We don't expect writes for partial writes with retry-able code.
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
require.Empty(t, len(appendable.samples))
require.Empty(t, len(appendable.histograms))
require.Empty(t, len(appendable.exemplars))
require.Empty(t, len(appendable.metadata))
return
}
// Double check mandatory 2.0 stats.
// writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each.
expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
expectHeaderValue(t, 4, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
if tc.appendExemplarErr != nil {
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
} else {
expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
}
// Double check what was actually appended.
var (
b = labels.NewScratchBuilder(0)
i, j, k, m int
)
for _, ts := range writeV2RequestFixture.Timeseries {
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
for _, s := range ts.Samples {
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
switch i {
case 0:
requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i])
case 1:
requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i])
default:
t.Fatal("more series/samples then expected")
}
i++
}
for _, e := range ts.Exemplars {
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := hp.ToFloatHistogram()
@ -345,9 +492,24 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
}
k++
}
if tc.appendExemplarErr == nil {
for _, e := range ts.Exemplars {
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
}
if tc.updateMetadataErr == nil {
expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols)
requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m])
m++
}
}
})
}
}
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderSample_V1Message(t *testing.T) {
for _, tc := range []struct {
Name string
@ -372,7 +534,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
appendable := &mockAppendable{latestSample: 100}
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@ -384,49 +546,10 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
}
}
func TestOutOfOrderSample_V2Message(t *testing.T) {
for _, tc := range []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
} {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{1, 2},
Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
}}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestSample: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
// This test case currently aims to verify that the WriteHandler endpoint
// don't fail on exemplar ingestion errors since the exemplar storage is
// still experimental.
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderExemplar_V1Message(t *testing.T) {
tests := []struct {
Name string
@ -453,7 +576,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
appendable := &mockAppendable{latestExemplar: 100}
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@ -466,49 +589,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
}
}
func TestOutOfOrderExemplar_V2Message(t *testing.T) {
tests := []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{1, 2},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
}}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestExemplar: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
require.Equal(t, http.StatusNoContent, resp.StatusCode)
})
}
}
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderHistogram_V1Message(t *testing.T) {
for _, tc := range []struct {
Name string
@ -533,7 +614,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
appendable := &mockAppendable{latestHistogram: 100}
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder()
@ -545,46 +626,6 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
}
}
func TestOutOfOrderHistogram_V2Message(t *testing.T) {
for _, tc := range []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
} {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{0, 1},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
}}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestHistogram: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
func BenchmarkRemoteWriteHandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
var reqs []*http.Request
@ -719,15 +760,20 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
}
type mockAppendable struct {
latestSample int64
latestSample map[uint64]int64
samples []mockSample
latestExemplar int64
latestExemplar map[uint64]int64
exemplars []mockExemplar
latestHistogram int64
latestHistogram map[uint64]int64
histograms []mockHistogram
metadata []mockMetadata
// optional errors to inject.
commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
}
type mockSample struct {
@ -765,48 +811,92 @@ func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...inte
}
func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
if m.latestSample == nil {
m.latestSample = map[uint64]int64{}
}
if m.latestHistogram == nil {
m.latestHistogram = map[uint64]int64{}
}
if m.latestExemplar == nil {
m.latestExemplar = map[uint64]int64{}
}
return m
}
func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if t < m.latestSample {
return 0, storage.ErrOutOfOrderSample
if m.appendSampleErr != nil {
return 0, m.appendSampleErr
}
m.latestSample = t
latestTs := m.latestSample[l.Hash()]
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if t == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
m.latestSample[l.Hash()] = t
m.samples = append(m.samples, mockSample{l, t, v})
return 0, nil
}
func (m *mockAppendable) Commit() error {
if m.commitErr != nil {
_ = m.Rollback() // As per Commit method contract.
}
return m.commitErr
}
func (*mockAppendable) Rollback() error {
return fmt.Errorf("not implemented")
func (m *mockAppendable) Rollback() error {
m.samples = m.samples[:0]
m.exemplars = m.exemplars[:0]
m.histograms = m.histograms[:0]
m.metadata = m.metadata[:0]
return nil
}
func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if e.Ts < m.latestExemplar {
return 0, storage.ErrOutOfOrderExemplar
if m.appendExemplarErr != nil {
return 0, m.appendExemplarErr
}
m.latestExemplar = e.Ts
latestTs := m.latestExemplar[l.Hash()]
if e.Ts < latestTs {
return 0, storage.ErrOutOfOrderExemplar
}
if e.Ts == latestTs {
return 0, storage.ErrDuplicateExemplar
}
m.latestExemplar[l.Hash()] = e.Ts
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
return 0, nil
}
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t < m.latestHistogram {
return 0, storage.ErrOutOfOrderSample
if m.appendHistogramErr != nil {
return 0, m.appendHistogramErr
}
m.latestHistogram = t
latestTs := m.latestHistogram[l.Hash()]
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if t == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
m.latestHistogram[l.Hash()] = t
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
return 0, nil
}
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
if m.updateMetadataErr != nil {
return 0, m.updateMetadataErr
}
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
return 0, nil
}

View file

@ -369,7 +369,7 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
}
func TestOTLPWriteHandler(t *testing.T) {
exportRequest := generateOTLPWriteRequest(t)
exportRequest := generateOTLPWriteRequest()
buf, err := exportRequest.MarshalProto()
require.NoError(t, err)
@ -392,7 +392,7 @@ func TestOTLPWriteHandler(t *testing.T) {
require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
}
func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
d := pmetric.NewMetrics()
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
@ -422,6 +422,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
counterDataPoint.Attributes().PutStr("foo.bar", "baz")
counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
counterExemplar.SetDoubleValue(10.0)
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})

View file

@ -49,16 +49,16 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
}
func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...)
func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...)
if err != nil {
return nil, w.Add(err), nil
}
return vals, w, nil
}
func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
names, w, err := s.genericQuerier.LabelNames(ctx, matchers...)
func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...)
if err != nil {
return nil, w.Add(err), nil
}

View file

@ -60,7 +60,7 @@ type XORChunk struct {
b bstream
}
// NewXORChunk returns a new chunk with XOR encoding of the given size.
// NewXORChunk returns a new chunk with XOR encoding.
func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128)
return &XORChunk{b: bstream{stream: b, count: 0}}

View file

@ -1001,7 +1001,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
q, err := db.Querier(0, 1)
require.NoError(t, err)
values, ws, err := q.LabelValues(ctx, "labelname")
values, ws, err := q.LabelValues(ctx, "labelname", nil)
require.NoError(t, err)
require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values)
@ -1976,7 +1976,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
defer q.Close()
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
b, ws, err := q.LabelValues(ctx, "blockID")
b, ws, err := q.LabelValues(ctx, "blockID", nil)
require.NoError(t, err)
var nilAnnotations annotations.Annotations
require.Equal(t, nilAnnotations, ws)
@ -2288,7 +2288,7 @@ func TestDB_LabelNames(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
var ws annotations.Annotations
labelNames, ws, err = q.LabelNames(ctx)
labelNames, ws, err = q.LabelNames(ctx, nil)
require.NoError(t, err)
require.Empty(t, ws)
require.NoError(t, q.Close())

View file

@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
}, nil
}
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, matchers...)
return res, nil, err
}
func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err
}

View file

@ -3022,7 +3022,7 @@ func TestQuerierIndexQueriesRace(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err)
values, _, err := q.LabelValues(ctx, "seq", c.matchers...)
values, _, err := q.LabelValues(ctx, "seq", nil, c.matchers...)
require.NoError(t, err)
require.Emptyf(t, values, `label values for label "seq" should be empty`)

View file

@ -543,7 +543,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
return histograms, nil
}
// Decode decodes a Histogram from a byte slice.
// DecodeFloatHistogram decodes a Histogram from a byte slice.
func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
fh.CounterResetHint = histogram.CounterResetHint(buf.Byte())

View file

@ -265,6 +265,11 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit.
func (w *Watcher) Run() error {
_, lastSegment, err := w.firstAndLast()
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
}
// We want to ensure this is false across iterations since
// Run will be called again if there was a failure to read the WAL.
w.sendSamples = false
@ -289,20 +294,14 @@ func (w *Watcher) Run() error {
return err
}
level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment)
level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
for !isClosed(w.quit) {
w.currentSegmentMetric.Set(float64(currentSegment))
// Re-check on each iteration in case a new segment was added,
// because watch() will wait for notifications on the last segment.
_, lastSegment, err := w.firstAndLast()
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
}
tail := currentSegment >= lastSegment
level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment, "lastSegment", lastSegment)
if err := w.watch(currentSegment, tail); err != nil && !errors.Is(err, ErrIgnorable) {
// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment)
if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
return err
}

View file

@ -17,6 +17,7 @@ import (
"math/rand"
"os"
"path"
"runtime"
"sync"
"testing"
"time"
@ -700,11 +701,46 @@ func TestRun_StartupTime(t *testing.T) {
}
}
func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error {
enc := record.Encoder{}
for j := 0; j < seriesCount; j++ {
ref := j + (segment * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)),
},
}, nil)
if err := w.Log(series); err != nil {
return err
}
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(segment),
V: float64(segment),
},
}, nil)
if err := w.Log(sample); err != nil {
return err
}
}
}
return nil
}
func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
const pageSize = 32 * 1024
const segments = 10
const seriesCount = 20
const samplesCount = 300
if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms.
t.SkipNow()
}
const segmentSize = pageSize // Smallest allowed segment size.
const segmentsToWrite = 5
const segmentsToRead = segmentsToWrite - 1
const seriesCount = 10
const samplesCount = 50
// This test can take longer than intended to finish in cloud CI.
readTimeout := 10 * time.Second
@ -717,73 +753,37 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
err := os.Mkdir(wdir, 0o777)
require.NoError(t, err)
enc := record.Encoder{}
w, err := NewSize(nil, nil, wdir, pageSize, compress)
w, err := NewSize(nil, nil, wdir, segmentSize, compress)
require.NoError(t, err)
var wg sync.WaitGroup
// add one segment initially to ensure there's a value > 0 for the last segment id
for i := 0; i < 1; i++ {
for j := 0; j < seriesCount; j++ {
ref := j + (i * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
}
// Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk.
require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount))
w.NextSegment() // Force creation of the next segment
wg.Add(1)
go func() {
defer wg.Done()
for i := 1; i < segments; i++ {
for j := 0; j < seriesCount; j++ {
ref := j + (i * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
for i := 1; i < segmentsToWrite; i++ {
require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
w.NextSegment()
}
}()
wt := newWriteToMock(time.Millisecond)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = segments
watcher.MaxSegment = segmentsToRead
watcher.setMetrics()
startTime := time.Now()
err = watcher.Run()
wg.Wait()
require.Less(t, time.Since(startTime), readTimeout)
// But samples records shouldn't get dropped
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() > 0
})
require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
require.NoError(t, err)
require.NoError(t, w.Close())
})

View file

@ -660,6 +660,10 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
hints := &storage.LabelHints{
Limit: toHintLimit(limit),
}
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil}
@ -674,7 +678,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
labelNamesSet := make(map[string]struct{})
for _, matchers := range matcherSets {
vals, callWarnings, err := q.LabelNames(r.Context(), matchers...)
vals, callWarnings, err := q.LabelNames(r.Context(), hints, matchers...)
if err != nil {
return apiFuncResult{nil, returnAPIError(err), warnings, nil}
}
@ -696,7 +700,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
if len(matcherSets) == 1 {
matchers = matcherSets[0]
}
names, warnings, err = q.LabelNames(r.Context(), matchers...)
names, warnings, err = q.LabelNames(r.Context(), hints, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
}
@ -706,7 +710,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
names = []string{}
}
if len(names) > limit {
if limit > 0 && len(names) > limit {
names = names[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit"))
}
@ -740,6 +744,10 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
hints := &storage.LabelHints{
Limit: toHintLimit(limit),
}
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
@ -764,7 +772,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
var callWarnings annotations.Annotations
labelValuesSet := make(map[string]struct{})
for _, matchers := range matcherSets {
vals, callWarnings, err = q.LabelValues(ctx, name, matchers...)
vals, callWarnings, err = q.LabelValues(ctx, name, hints, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
}
@ -783,7 +791,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
if len(matcherSets) == 1 {
matchers = matcherSets[0]
}
vals, warnings, err = q.LabelValues(ctx, name, matchers...)
vals, warnings, err = q.LabelValues(ctx, name, hints, matchers...)
if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
}
@ -795,7 +803,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
slices.Sort(vals)
if len(vals) > limit {
if limit > 0 && len(vals) > limit {
vals = vals[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit"))
}
@ -865,6 +873,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
Start: timestamp.FromTime(start),
End: timestamp.FromTime(end),
Func: "series", // There is no series function, this token is used for lookups that don't need samples.
Limit: toHintLimit(limit),
}
var set storage.SeriesSet
@ -891,7 +900,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
}
metrics = append(metrics, set.At().Labels())
if len(metrics) > limit {
if limit > 0 && len(metrics) > limit {
metrics = metrics[:limit]
warnings.Add(errors.New("results truncated due to limit"))
return apiFuncResult{metrics, nil, warnings, closer}
@ -1397,6 +1406,11 @@ func (api *API) rules(r *http.Request) apiFuncResult {
rgSet := queryFormToSet(r.Form["rule_group[]"])
fSet := queryFormToSet(r.Form["file[]"])
matcherSets, err := parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
ruleGroups := api.rulesRetriever(r.Context()).RuleGroups()
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))}
typ := strings.ToLower(r.URL.Query().Get("type"))
@ -1436,7 +1450,8 @@ func (api *API) rules(r *http.Request) apiFuncResult {
EvaluationTime: grp.GetEvaluationTime().Seconds(),
LastEvaluation: grp.GetLastEvaluation(),
}
for _, rr := range grp.Rules() {
for _, rr := range grp.Rules(matcherSets...) {
var enrichedRule Rule
if len(rnSet) > 0 {
@ -1902,8 +1917,8 @@ OUTER:
return matcherSets, nil
}
// parseLimitParam returning 0 means no limit is to be applied.
func parseLimitParam(limitStr string) (limit int, err error) {
limit = math.MaxInt
if limitStr == "" {
return limit, nil
}
@ -1912,9 +1927,19 @@ func parseLimitParam(limitStr string) (limit int, err error) {
if err != nil {
return limit, err
}
if limit <= 0 {
return limit, errors.New("limit must be positive")
if limit < 0 {
return limit, errors.New("limit must be non-negative")
}
return limit, nil
}
// toHintLimit increases the API limit, as returned by parseLimitParam, by 1.
// This allows for emitting warnings when the results are truncated.
func toHintLimit(limit int) int {
// 0 means no limit and avoid int overflow
if limit > 0 && limit < math.MaxInt {
return limit + 1
}
return limit
}

View file

@ -261,11 +261,36 @@ func (m *rulesRetrieverMock) CreateAlertingRules() {
false,
log.NewNopLogger(),
)
rule4 := rules.NewAlertingRule(
"test_metric6",
expr2,
time.Second,
0,
labels.FromStrings("testlabel", "rule"),
labels.Labels{},
labels.Labels{},
"",
true,
log.NewNopLogger(),
)
rule5 := rules.NewAlertingRule(
"test_metric7",
expr2,
time.Second,
0,
labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
labels.Labels{},
labels.Labels{},
"",
true,
log.NewNopLogger(),
)
var r []*rules.AlertingRule
r = append(r, rule1)
r = append(r, rule2)
r = append(r, rule3)
r = append(r, rule4)
r = append(r, rule5)
m.alertingRules = r
}
@ -300,7 +325,9 @@ func (m *rulesRetrieverMock) CreateRuleGroups() {
recordingExpr, err := parser.ParseExpr(`vector(1)`)
require.NoError(m.testing, err, "unable to parse alert expression")
recordingRule := rules.NewRecordingRule("recording-rule-1", recordingExpr, labels.Labels{})
recordingRule2 := rules.NewRecordingRule("recording-rule-2", recordingExpr, labels.FromStrings("testlabel", "rule"))
r = append(r, recordingRule)
r = append(r, recordingRule2)
group := rules.NewGroup(rules.GroupOptions{
Name: "grp",
@ -739,13 +766,16 @@ func TestLabelNames(t *testing.T) {
api := &API{
Queryable: storage,
}
request := func(method string, matchers ...string) (*http.Request, error) {
request := func(method, limit string, matchers ...string) (*http.Request, error) {
u, err := url.Parse("http://example.com")
require.NoError(t, err)
q := u.Query()
for _, matcher := range matchers {
q.Add("match[]", matcher)
}
if limit != "" {
q.Add("limit", limit)
}
u.RawQuery = q.Encode()
r, err := http.NewRequest(method, u.String(), nil)
@ -759,6 +789,7 @@ func TestLabelNames(t *testing.T) {
name string
api *API
matchers []string
limit string
expected []string
expectedErrorType errorType
}{
@ -773,6 +804,13 @@ func TestLabelNames(t *testing.T) {
expected: []string{"__name__", "abc", "foo", "xyz"},
api: api,
},
{
name: "non empty label matcher with limit",
matchers: []string{`{foo=~".+"}`},
expected: []string{"__name__", "abc"},
limit: "2",
api: api,
},
{
name: "exact label matcher",
matchers: []string{`{foo="boo"}`},
@ -805,7 +843,7 @@ func TestLabelNames(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
for _, method := range []string{http.MethodGet, http.MethodPost} {
ctx := context.Background()
req, err := request(method, tc.matchers...)
req, err := request(method, tc.limit, tc.matchers...)
require.NoError(t, err)
res := tc.api.labelNames(req.WithContext(ctx))
assertAPIError(t, res.err, tc.expectedErrorType)
@ -1430,6 +1468,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
responseLen: 2, // API does not specify which particular value will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
},
{
endpoint: api.series,
query: url.Values{
"match[]": []string{"test_metric1"},
"limit": []string{"0"},
},
responseLen: 2, // API does not specify which particular value will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
},
// Missing match[] query params in series requests.
{
endpoint: api.series,
@ -2151,6 +2198,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
RecordingRule{
Name: "recording-rule-1",
Query: "vector(1)",
@ -2158,6 +2227,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "recording",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
@ -2210,6 +2286,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: nil,
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: nil,
Health: "ok",
Type: "alerting",
},
RecordingRule{
Name: "recording-rule-1",
Query: "vector(1)",
@ -2217,6 +2315,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "recording",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
@ -2276,6 +2381,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
},
},
},
@ -2302,6 +2429,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok",
Type: "recording",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
@ -2369,6 +2503,179 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"match[]": []string{`{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"type": []string{"alert"},
"match[]": []string{`{templatedlabel="{{ $externalURL }}"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"match[]": []string{`{testlabel="abc"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{},
},
},
// This is testing OR condition, the api response should return rule if it matches one of the label selector
{
endpoint: api.rules,
query: url.Values{
"match[]": []string{`{testlabel="abc"}`, `{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"type": []string{"record"},
"match[]": []string{`{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"type": []string{"alert"},
"match[]": []string{`{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.queryExemplars,
query: url.Values{

View file

@ -171,11 +171,11 @@ type errorTestQuerier struct {
err error
}
func (t errorTestQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (t errorTestQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err
}
func (t errorTestQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
func (t errorTestQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err
}

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.53.0",
"version": "0.53.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,11 +29,11 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.53.0",
"@prometheus-io/lezer-promql": "0.53.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
"@codemirror/autocomplete": "^6.16.2",
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0",
"@codemirror/state": "^6.3.3",

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.53.0",
"version": "0.53.1",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",

View file

@ -1,19 +1,19 @@
{
"name": "prometheus-io",
"version": "0.53.0",
"version": "0.53.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "prometheus-io",
"version": "0.53.0",
"version": "0.53.1",
"workspaces": [
"react-app",
"module/*"
],
"devDependencies": {
"@types/jest": "^29.5.12",
"@types/node": "^20.14.2",
"@types/node": "^20.14.9",
"eslint-config-prettier": "^9.1.0",
"eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^4.2.1",
@ -21,7 +21,7 @@
"jest-fetch-mock": "^3.0.3",
"prettier": "^2.8.8",
"react-scripts": "^5.0.1",
"ts-jest": "^29.1.4",
"ts-jest": "^29.2.2",
"typescript": "^4.9.5"
},
"engines": {
@ -30,14 +30,14 @@
},
"module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql",
"version": "0.53.0",
"version": "0.53.1",
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.53.0",
"@prometheus-io/lezer-promql": "0.53.1",
"lru-cache": "^7.18.3"
},
"devDependencies": {
"@codemirror/autocomplete": "^6.16.2",
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0",
"@codemirror/state": "^6.3.3",
@ -69,7 +69,7 @@
},
"module/lezer-promql": {
"name": "@prometheus-io/lezer-promql",
"version": "0.53.0",
"version": "0.53.1",
"license": "Apache-2.0",
"devDependencies": {
"@lezer/generator": "^1.7.0",
@ -2027,9 +2027,9 @@
"license": "MIT"
},
"node_modules/@codemirror/autocomplete": {
"version": "6.16.2",
"resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.16.2.tgz",
"integrity": "sha512-MjfDrHy0gHKlPWsvSsikhO1+BOh+eBHNgfH1OXs1+DAf30IonQldgMM3kxLDTG9ktE7kDLaA1j/l7KMPA4KNfw==",
"version": "6.17.0",
"resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz",
"integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==",
"dependencies": {
"@codemirror/language": "^6.0.0",
"@codemirror/state": "^6.0.0",
@ -4199,9 +4199,9 @@
"license": "MIT"
},
"node_modules/@types/node": {
"version": "20.14.2",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.2.tgz",
"integrity": "sha512-xyu6WAMVwv6AKFLB+e/7ySZVr/0zLCzOa7rSpq6jNwpqOrUbcACDWC+53d4n2QHOnDou0fbIsg8wZu/sxrnI4Q==",
"version": "20.14.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.9.tgz",
"integrity": "sha512-06OCtnTXtWOZBJlRApleWndH4JsRVs1pDCc8dLSQp+7PpUpX3ePdHyeNSFTeSe7FtKyQkrlPvHwJOW3SLd8Oyg==",
"dependencies": {
"undici-types": "~5.26.4"
}
@ -16807,9 +16807,9 @@
"license": "CC0-1.0"
},
"node_modules/sass": {
"version": "1.77.4",
"resolved": "https://registry.npmjs.org/sass/-/sass-1.77.4.tgz",
"integrity": "sha512-vcF3Ckow6g939GMA4PeU7b2K/9FALXk2KF9J87txdHzXbUF9XRQRwSxcAs/fGaTnJeBFd7UoV22j3lzMLdM0Pw==",
"version": "1.77.6",
"resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz",
"integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==",
"dependencies": {
"chokidar": ">=3.0.0 <4.0.0",
"immutable": "^4.0.0",
@ -18028,12 +18028,13 @@
"license": "MIT"
},
"node_modules/ts-jest": {
"version": "29.1.4",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.4.tgz",
"integrity": "sha512-YiHwDhSvCiItoAgsKtoLFCuakDzDsJ1DLDnSouTaTmdOcOwIkSzbLXduaQ6M5DRVhuZC/NYaaZ/mtHbWMv/S6Q==",
"version": "29.2.2",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.2.tgz",
"integrity": "sha512-sSW7OooaKT34AAngP6k1VS669a0HdLxkQZnlC7T76sckGCokXFnvJ3yRlQZGRTAoV5K19HfSgCiSwWOSIfcYlg==",
"dev": true,
"dependencies": {
"bs-logger": "0.x",
"ejs": "^3.0.0",
"fast-json-stable-stringify": "2.x",
"jest-util": "^29.0.0",
"json5": "^2.2.3",
@ -19331,9 +19332,9 @@
},
"react-app": {
"name": "@prometheus-io/app",
"version": "0.53.0",
"version": "0.53.1",
"dependencies": {
"@codemirror/autocomplete": "^6.16.2",
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0",
@ -19349,7 +19350,7 @@
"@lezer/lr": "^1.4.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.53.0",
"@prometheus-io/codemirror-promql": "0.53.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",
@ -19368,7 +19369,7 @@
"react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0",
"sass": "1.77.4",
"sass": "1.77.6",
"tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3"
},

View file

@ -17,7 +17,7 @@
},
"devDependencies": {
"@types/jest": "^29.5.12",
"@types/node": "^20.14.2",
"@types/node": "^20.14.9",
"eslint-config-prettier": "^9.1.0",
"eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^4.2.1",
@ -25,8 +25,8 @@
"jest-fetch-mock": "^3.0.3",
"prettier": "^2.8.8",
"react-scripts": "^5.0.1",
"ts-jest": "^29.1.4",
"ts-jest": "^29.2.2",
"typescript": "^4.9.5"
},
"version": "0.53.0"
"version": "0.53.1"
}

View file

@ -1,9 +1,9 @@
{
"name": "@prometheus-io/app",
"version": "0.53.0",
"version": "0.53.1",
"private": true,
"dependencies": {
"@codemirror/autocomplete": "^6.16.2",
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0",
@ -19,7 +19,7 @@
"@lezer/lr": "^1.4.1",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.53.0",
"@prometheus-io/codemirror-promql": "0.53.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",
@ -38,7 +38,7 @@
"react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0",
"sass": "1.77.4",
"sass": "1.77.6",
"tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3"
},