Merge pull request #14488 from jan--f/3.0-main-sync-24-07-18

3.0 main sync 24 07 18
This commit is contained in:
Björn Rabenstein 2024-07-23 16:59:36 +02:00 committed by GitHub
commit 91c05eed84
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
68 changed files with 2394 additions and 1221 deletions

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2 - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8 uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11

View file

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8 uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View file

@ -1,12 +1,5 @@
run: run:
timeout: 15m timeout: 15m
skip-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
skip-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
output: output:
sort-results: true sort-results: true
@ -33,6 +26,13 @@ linters:
issues: issues:
max-same-issues: 0 max-same-issues: 0
exclude-files:
# Skip autogenerated files.
- ^.*\.(pb|y)\.go$
exclude-dirs:
# Copied it from a different source
- storage/remote/otlptranslator/prometheusremotewrite
- storage/remote/otlptranslator/prometheus
exclude-rules: exclude-rules:
- linters: - linters:
- gocritic - gocritic

View file

@ -8,6 +8,14 @@ _Please add changes here that are only in the release-3.0 branch. These will be
## unreleased ## unreleased
## 2.53.1 / 2024-07-10
Fix a bug which would drop samples in remote-write if the sending flow stalled
for longer than it takes to write one "WAL segment". How long this takes depends on the size
of your Prometheus; as a rough guide with 10 million series it is about 2-3 minutes.
* [BUGFIX] Remote-write: stop dropping samples in catch-up #14446
## 2.53.0 / 2024-06-16 ## 2.53.0 / 2024-06-16
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75. This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.

View file

@ -57,7 +57,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) | | v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) | | v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) | | v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) | | v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -30,7 +30,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` | | <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` | | <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | | <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
| <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com' | `.*` | | <code class="text-nowrap">--web.cors.origin</code> | Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1\|domain2)\.com' | `.*` |
| <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` | | <code class="text-nowrap">--storage.tsdb.path</code> | Base path for metrics storage. Use with server mode only. | `data/` |
| <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | | | <code class="text-nowrap">--storage.tsdb.retention</code> | [DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use "storage.tsdb.retention.time" instead. Use with server mode only. | |
| <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | | | <code class="text-nowrap">--storage.tsdb.retention.time</code> | How long to retain samples in storage. When this flag is set it overrides "storage.tsdb.retention". If neither this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms. Use with server mode only. | |

View file

@ -260,7 +260,7 @@ URL query parameters:
series to return. At least one `match[]` argument must be provided. series to return. At least one `match[]` argument must be provided.
- `start=<rfc3339 | unix_timestamp>`: Start timestamp. - `start=<rfc3339 | unix_timestamp>`: Start timestamp.
- `end=<rfc3339 | unix_timestamp>`: End timestamp. - `end=<rfc3339 | unix_timestamp>`: End timestamp.
- `limit=<number>`: Maximum number of returned series. Optional. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
You can URL-encode these parameters directly in the request body by using the `POST` method and You can URL-encode these parameters directly in the request body by using the `POST` method and
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large `Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
@ -311,7 +311,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional. - `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the - `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label names. Optional. series from which to read the label names. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label names. The `data` section of the JSON response is a list of string label names.
@ -362,7 +362,7 @@ URL query parameters:
- `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional. - `end=<rfc3339 | unix_timestamp>`: End timestamp. Optional.
- `match[]=<series_selector>`: Repeated series selector argument that selects the - `match[]=<series_selector>`: Repeated series selector argument that selects the
series from which to read the label values. Optional. series from which to read the label values. Optional.
- `limit=<number>`: Maximum number of returned series. Optional. - `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
The `data` section of the JSON response is a list of string label values. The `data` section of the JSON response is a list of string label values.
@ -694,6 +694,7 @@ URL query parameters:
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done. - `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done. - `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
- `exclude_alerts=<bool>`: only return rules, do not return active alerts. - `exclude_alerts=<bool>`: only return rules, do not return active alerts.
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
```json ```json
$ curl http://localhost:9090/api/v1/rules $ curl http://localhost:9090/api/v1/rules

View file

@ -82,6 +82,16 @@ Examples:
-Inf -Inf
NaN NaN
As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change.
Examples:
1s # Equivalent to 1.0
2m # Equivalent to 120.0
1ms # Equivalent to 0.001
## Time series selectors ## Time series selectors
Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values. Time series selectors are responsible for selecting the times series and raw or inferred sample timestamps and values.
@ -224,6 +234,15 @@ Here are some examples of valid time durations:
5m 5m
10s 10s
As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change.
Examples:
1.0 # Equivalent to 1s
0.001 # Equivalent to 1ms
120 # Equivalent to 2m
### Offset modifier ### Offset modifier
The `offset` modifier allows changing the time offset for individual The `offset` modifier allows changing the time offset for individual

View file

@ -98,8 +98,9 @@ vector.
clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`. clamps the sample values of all elements in `v` to have a lower limit of `min` and an upper limit of `max`.
Special cases: Special cases:
- Return an empty vector if `min > max`
- Return `NaN` if `min` or `max` is `NaN` * Return an empty vector if `min > max`
* Return `NaN` if `min` or `max` is `NaN`
## `clamp_max()` ## `clamp_max()`
@ -349,8 +350,8 @@ a histogram.
Buckets of classic histograms are cumulative. Therefore, the following should always be the case: Buckets of classic histograms are cumulative. Therefore, the following should always be the case:
- The counts in the buckets are monotonically increasing (strictly non-decreasing). * The counts in the buckets are monotonically increasing (strictly non-decreasing).
- A lack of observations between the upper limits of two consecutive buckets results in equal counts * A lack of observations between the upper limits of two consecutive buckets results in equal counts
in those two buckets. in those two buckets.
However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets However, floating point precision issues (e.g. small discrepancies introduced by computing of buckets
@ -692,21 +693,21 @@ ignore histogram samples.
The trigonometric functions work in radians: The trigonometric functions work in radians:
- `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)). * `acos(v instant-vector)`: calculates the arccosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acos)).
- `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)). * `acosh(v instant-vector)`: calculates the inverse hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Acosh)).
- `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)). * `asin(v instant-vector)`: calculates the arcsine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asin)).
- `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)). * `asinh(v instant-vector)`: calculates the inverse hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Asinh)).
- `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)). * `atan(v instant-vector)`: calculates the arctangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atan)).
- `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)). * `atanh(v instant-vector)`: calculates the inverse hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Atanh)).
- `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)). * `cos(v instant-vector)`: calculates the cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cos)).
- `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)). * `cosh(v instant-vector)`: calculates the hyperbolic cosine of all elements in `v` ([special cases](https://pkg.go.dev/math#Cosh)).
- `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)). * `sin(v instant-vector)`: calculates the sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sin)).
- `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)). * `sinh(v instant-vector)`: calculates the hyperbolic sine of all elements in `v` ([special cases](https://pkg.go.dev/math#Sinh)).
- `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)). * `tan(v instant-vector)`: calculates the tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tan)).
- `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)). * `tanh(v instant-vector)`: calculates the hyperbolic tangent of all elements in `v` ([special cases](https://pkg.go.dev/math#Tanh)).
The following are useful for converting between degrees and radians: The following are useful for converting between degrees and radians:
- `deg(v instant-vector)`: converts radians to degrees for all elements in `v`. * `deg(v instant-vector)`: converts radians to degrees for all elements in `v`.
- `pi()`: returns pi. * `pi()`: returns pi.
- `rad(v instant-vector)`: converts degrees to radians for all elements in `v`. * `rad(v instant-vector)`: converts degrees to radians for all elements in `v`.

View file

@ -137,6 +137,18 @@ will be used.
Expired block cleanup happens in the background. It may take up to two hours Expired block cleanup happens in the background. It may take up to two hours
to remove expired blocks. Blocks must be fully expired before they are removed. to remove expired blocks. Blocks must be fully expired before they are removed.
## Right-Sizing Retention Size
If you are utilizing `storage.tsdb.retention.size` to set a size limit, you
will want to consider the right size for this value relative to the storage you
have allocated for Prometheus. It is wise to reduce the retention size to provide
a buffer, ensuring that older entries will be removed before the allocated storage
for Prometheus becomes full.
At present, we recommend setting the retention size to, at most, 80-85% of your
allocated Prometheus disk space. This increases the likelihood that older entires
will be removed prior to hitting any disk limitations.
## Remote storage integrations ## Remote storage integrations
Prometheus's local storage is limited to a single node's scalability and durability. Prometheus's local storage is limited to a single node's scalability and durability.

View file

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.21 go 1.21.0
require ( require (
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0

10
go.mod
View file

@ -1,6 +1,8 @@
module github.com/prometheus/prometheus module github.com/prometheus/prometheus
go 1.21 go 1.21.0
toolchain go1.22.5
require ( require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
@ -60,8 +62,8 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.8.0 go.opentelemetry.io/collector/pdata v1.11.0
go.opentelemetry.io/collector/semconv v0.101.0 go.opentelemetry.io/collector/semconv v0.104.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
go.opentelemetry.io/otel v1.27.0 go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
@ -83,7 +85,7 @@ require (
google.golang.org/api v0.183.0 google.golang.org/api v0.183.0
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
google.golang.org/grpc v1.64.0 google.golang.org/grpc v1.64.0
google.golang.org/protobuf v1.34.1 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3 k8s.io/api v0.29.3

16
go.sum
View file

@ -396,8 +396,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -723,10 +723,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764= go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY= go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q= go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A= go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
@ -1119,8 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -3456,6 +3456,12 @@ func setOffsetForAtModifier(evalTime int64, expr parser.Expr) {
// required for correctness. // required for correctness.
func detectHistogramStatsDecoding(expr parser.Expr) { func detectHistogramStatsDecoding(expr parser.Expr) {
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
if n, ok := node.(*parser.BinaryExpr); ok {
detectHistogramStatsDecoding(n.LHS)
detectHistogramStatsDecoding(n.RHS)
return fmt.Errorf("stop")
}
n, ok := (node).(*parser.VectorSelector) n, ok := (node).(*parser.VectorSelector)
if !ok { if !ok {
return nil return nil

View file

@ -238,11 +238,11 @@ func (q *errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*lab
return errSeriesSet{err: q.err} return errSeriesSet{err: q.err}
} }
func (*errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (*errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (*errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (*errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (*errQuerier) Close() error { return nil } func (*errQuerier) Close() error { return nil }

View file

@ -43,7 +43,6 @@ import (
int int64 int int64
uint uint64 uint uint64
float float64 float float64
duration time.Duration
} }
@ -176,8 +175,7 @@ START_METRIC_SELECTOR
%type <int> int %type <int> int
%type <uint> uint %type <uint> uint
%type <float> number series_value signed_number signed_or_unsigned_number %type <float> number series_value signed_number signed_or_unsigned_number
%type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector %type <node> step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_duration_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector
%type <duration> duration maybe_duration
%start start %start start
@ -218,7 +216,7 @@ expr :
| binary_expr | binary_expr
| function_call | function_call
| matrix_selector | matrix_selector
| number_literal | number_duration_literal
| offset_expr | offset_expr
| paren_expr | paren_expr
| string_literal | string_literal
@ -415,18 +413,22 @@ paren_expr : LEFT_PAREN expr RIGHT_PAREN
* Offset modifiers. * Offset modifiers.
*/ */
offset_expr: expr OFFSET duration offset_expr: expr OFFSET number_duration_literal
{ {
yylex.(*parser).addOffset($1, $3) numLit, _ := $3.(*NumberLiteral)
dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, dur)
$$ = $1 $$ = $1
} }
| expr OFFSET SUB duration | expr OFFSET SUB number_duration_literal
{ {
yylex.(*parser).addOffset($1, -$4) numLit, _ := $4.(*NumberLiteral)
dur := time.Duration(numLit.Val * 1000) * time.Millisecond
yylex.(*parser).addOffset($1, -dur)
$$ = $1 $$ = $1
} }
| expr OFFSET error | expr OFFSET error
{ yylex.(*parser).unexpected("offset", "duration"); $$ = $1 } { yylex.(*parser).unexpected("offset", "number or duration"); $$ = $1 }
; ;
/* /*
* @ modifiers. * @ modifiers.
@ -452,7 +454,7 @@ at_modifier_preprocessors: START | END;
* Subquery and range selectors. * Subquery and range selectors.
*/ */
matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
{ {
var errMsg string var errMsg string
vs, ok := $1.(*VectorSelector) vs, ok := $1.(*VectorSelector)
@ -469,32 +471,44 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET
yylex.(*parser).addParseErrf(errRange, errMsg) yylex.(*parser).addParseErrf(errRange, errMsg)
} }
numLit, _ := $3.(*NumberLiteral)
$$ = &MatrixSelector{ $$ = &MatrixSelector{
VectorSelector: $1.(Expr), VectorSelector: $1.(Expr),
Range: $3, Range: time.Duration(numLit.Val * 1000) * time.Millisecond,
EndPos: yylex.(*parser).lastClosing, EndPos: yylex.(*parser).lastClosing,
} }
} }
; ;
subquery_expr : expr LEFT_BRACKET duration COLON maybe_duration RIGHT_BRACKET subquery_expr : expr LEFT_BRACKET number_duration_literal COLON number_duration_literal RIGHT_BRACKET
{ {
numLitRange, _ := $3.(*NumberLiteral)
numLitStep, _ := $5.(*NumberLiteral)
$$ = &SubqueryExpr{ $$ = &SubqueryExpr{
Expr: $1.(Expr), Expr: $1.(Expr),
Range: $3, Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: $5, Step: time.Duration(numLitStep.Val * 1000) * time.Millisecond,
EndPos: $6.Pos + 1, EndPos: $6.Pos + 1,
} }
} }
| expr LEFT_BRACKET duration COLON duration error | expr LEFT_BRACKET number_duration_literal COLON RIGHT_BRACKET
{
numLitRange, _ := $3.(*NumberLiteral)
$$ = &SubqueryExpr{
Expr: $1.(Expr),
Range: time.Duration(numLitRange.Val * 1000) * time.Millisecond,
Step: 0,
EndPos: $5.Pos + 1,
}
}
| expr LEFT_BRACKET number_duration_literal COLON number_duration_literal error
{ yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "\"]\""); $$ = $1 }
| expr LEFT_BRACKET duration COLON error | expr LEFT_BRACKET number_duration_literal COLON error
{ yylex.(*parser).unexpected("subquery selector", "duration or \"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\""); $$ = $1 }
| expr LEFT_BRACKET duration error | expr LEFT_BRACKET number_duration_literal error
{ yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 } { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\""); $$ = $1 }
| expr LEFT_BRACKET error | expr LEFT_BRACKET error
{ yylex.(*parser).unexpected("subquery selector", "duration"); $$ = $1 } { yylex.(*parser).unexpected("subquery selector", "number or duration"); $$ = $1 }
; ;
/* /*
@ -866,16 +880,43 @@ match_op : EQL | NEQ | EQL_REGEX | NEQ_REGEX ;
* Literals. * Literals.
*/ */
number_literal : NUMBER number_duration_literal : NUMBER
{ {
$$ = &NumberLiteral{ $$ = &NumberLiteral{
Val: yylex.(*parser).number($1.Val), Val: yylex.(*parser).number($1.Val),
PosRange: $1.PositionRange(), PosRange: $1.PositionRange(),
} }
} }
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = &NumberLiteral{
Val: dur.Seconds(),
PosRange: $1.PositionRange(),
}
}
; ;
number : NUMBER { $$ = yylex.(*parser).number($1.Val) } ; number : NUMBER
{
$$ = yylex.(*parser).number($1.Val)
}
| DURATION
{
var err error
var dur time.Duration
dur, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
$$ = dur.Seconds()
}
;
signed_number : ADD number { $$ = $2 } signed_number : ADD number { $$ = $2 }
| SUB number { $$ = -$2 } | SUB number { $$ = -$2 }
@ -897,17 +938,6 @@ int : SUB uint { $$ = -int64($2) }
| uint { $$ = int64($1) } | uint { $$ = int64($1) }
; ;
duration : DURATION
{
var err error
$$, err = parseDuration($1.Val)
if err != nil {
yylex.(*parser).addParseErr($1.PositionRange(), err)
}
}
;
string_literal : STRING string_literal : STRING
{ {
$$ = &StringLiteral{ $$ = &StringLiteral{
@ -931,11 +961,6 @@ string_identifier : STRING
* Wrappers for optional arguments. * Wrappers for optional arguments.
*/ */
maybe_duration : /* empty */
{$$ = 0}
| duration
;
maybe_grouping_labels: /* empty */ { $$ = nil } maybe_grouping_labels: /* empty */ { $$ = nil }
| grouping_labels | grouping_labels
; ;

File diff suppressed because it is too large Load diff

View file

@ -478,7 +478,7 @@ func lexStatements(l *Lexer) stateFn {
skipSpaces(l) skipSpaces(l)
} }
l.bracketOpen = true l.bracketOpen = true
return lexDuration return lexNumberOrDuration
case r == ']': case r == ']':
if !l.bracketOpen { if !l.bracketOpen {
return l.errorf("unexpected right bracket %q", r) return l.errorf("unexpected right bracket %q", r)
@ -846,18 +846,6 @@ func lexLineComment(l *Lexer) stateFn {
return lexStatements return lexStatements
} }
func lexDuration(l *Lexer) stateFn {
if l.scanNumber() {
return l.errorf("missing unit character in duration")
}
if !acceptRemainingDuration(l) {
return l.errorf("bad duration syntax: %q", l.input[l.start:l.pos])
}
l.backup()
l.emit(DURATION)
return lexStatements
}
// lexNumber scans a number: decimal, hex, oct or float. // lexNumber scans a number: decimal, hex, oct or float.
func lexNumber(l *Lexer) stateFn { func lexNumber(l *Lexer) stateFn {
if !l.scanNumber() { if !l.scanNumber() {
@ -909,6 +897,7 @@ func acceptRemainingDuration(l *Lexer) bool {
// scanNumber scans numbers of different formats. The scanned Item is // scanNumber scans numbers of different formats. The scanned Item is
// not necessarily a valid number. This case is caught by the parser. // not necessarily a valid number. This case is caught by the parser.
func (l *Lexer) scanNumber() bool { func (l *Lexer) scanNumber() bool {
initialPos := l.pos
// Modify the digit pattern if the number is hexadecimal. // Modify the digit pattern if the number is hexadecimal.
digitPattern := "0123456789" digitPattern := "0123456789"
// Disallow hexadecimal in series descriptions as the syntax is ambiguous. // Disallow hexadecimal in series descriptions as the syntax is ambiguous.
@ -980,7 +969,10 @@ func (l *Lexer) scanNumber() bool {
// Handle digits at the end since we already consumed before this loop. // Handle digits at the end since we already consumed before this loop.
l.acceptRun(digitPattern) l.acceptRun(digitPattern)
} }
// Empty string is not a valid number.
if l.pos == initialPos {
return false
}
// Next thing must not be alphanumeric unless it's the times token // Next thing must not be alphanumeric unless it's the times token
// for series repetitions. // for series repetitions.
if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) { if r := l.peek(); (l.seriesDesc && r == 'x') || !isAlphaNumeric(r) {

View file

@ -2133,6 +2133,115 @@ var testExpr = []struct {
EndPos: 25, EndPos: 25,
}, },
}, },
{
input: `test{a="b"}[5m] OFFSET 3600`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
OriginalOffset: 1 * time.Hour,
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, "a", "b"),
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 27,
},
},
{
input: `foo[3ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 3 * time.Millisecond,
EndPos: 16,
},
},
{
input: `foo[4s180ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 20,
},
},
{
input: `foo[4.18] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 180*time.Millisecond,
EndPos: 17,
},
},
{
input: `foo[4s18ms] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 19,
},
},
{
input: `foo[4.018] @ 2.345`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "foo",
Timestamp: makeInt64Pointer(2345),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "foo"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 3,
},
},
Range: 4*time.Second + 18*time.Millisecond,
EndPos: 18,
},
},
{ {
input: `test{a="b"}[5y] @ 1603774699`, input: `test{a="b"}[5y] @ 1603774699`,
expected: &MatrixSelector{ expected: &MatrixSelector{
@ -2152,15 +2261,50 @@ var testExpr = []struct {
EndPos: 28, EndPos: 28,
}, },
}, },
{
input: "test[5]",
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "test",
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "test"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 4,
},
},
Range: 5 * time.Second,
EndPos: 7,
},
},
{
input: `some_metric[5m] @ 1m`,
expected: &MatrixSelector{
VectorSelector: &VectorSelector{
Name: "some_metric",
Timestamp: makeInt64Pointer(60000),
LabelMatchers: []*labels.Matcher{
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some_metric"),
},
PosRange: posrange.PositionRange{
Start: 0,
End: 11,
},
},
Range: 5 * time.Minute,
EndPos: 20,
},
},
{ {
input: `foo[5mm]`, input: `foo[5mm]`,
fail: true, fail: true,
errMsg: "bad duration syntax: \"5mm\"", errMsg: "bad number or duration syntax: \"5mm\"",
}, },
{ {
input: `foo[5m1]`, input: `foo[5m1]`,
fail: true, fail: true,
errMsg: "bad duration syntax: \"5m1\"", errMsg: "bad number or duration syntax: \"5m1\"",
}, },
{ {
input: `foo[5m:1m1]`, input: `foo[5m:1m1]`,
@ -2194,17 +2338,12 @@ var testExpr = []struct {
{ {
input: `foo[]`, input: `foo[]`,
fail: true, fail: true,
errMsg: "missing unit character in duration", errMsg: "bad number or duration syntax: \"\"",
}, },
{ {
input: `foo[1]`, input: `foo[-1]`,
fail: true, fail: true,
errMsg: "missing unit character in duration", errMsg: "bad number or duration syntax: \"\"",
},
{
input: `some_metric[5m] OFFSET 1`,
fail: true,
errMsg: "unexpected number \"1\" in offset, expected duration",
}, },
{ {
input: `some_metric[5m] OFFSET 1mm`, input: `some_metric[5m] OFFSET 1mm`,
@ -2214,18 +2353,13 @@ var testExpr = []struct {
{ {
input: `some_metric[5m] OFFSET`, input: `some_metric[5m] OFFSET`,
fail: true, fail: true,
errMsg: "unexpected end of input in offset, expected duration", errMsg: "unexpected end of input in offset, expected number or duration",
}, },
{ {
input: `some_metric OFFSET 1m[5m]`, input: `some_metric OFFSET 1m[5m]`,
fail: true, fail: true,
errMsg: "1:22: parse error: no offset modifiers allowed before range", errMsg: "1:22: parse error: no offset modifiers allowed before range",
}, },
{
input: `some_metric[5m] @ 1m`,
fail: true,
errMsg: "1:19: parse error: unexpected duration \"1m\" in @, expected timestamp",
},
{ {
input: `some_metric[5m] @`, input: `some_metric[5m] @`,
fail: true, fail: true,
@ -2910,6 +3044,11 @@ var testExpr = []struct {
errMsg: "illegal character U+002E '.' in escape sequence", errMsg: "illegal character U+002E '.' in escape sequence",
}, },
// Subquery. // Subquery.
{
input: `foo{bar="baz"}[`,
fail: true,
errMsg: `1:16: parse error: bad number or duration syntax: ""`,
},
{ {
input: `foo{bar="baz"}[10m:6s]`, input: `foo{bar="baz"}[10m:6s]`,
expected: &SubqueryExpr{ expected: &SubqueryExpr{

View file

@ -55,6 +55,11 @@ const (
DefaultMaxSamplesPerQuery = 10000 DefaultMaxSamplesPerQuery = 10000
) )
type TBRun interface {
testing.TB
Run(string, func(*testing.T)) bool
}
var testStartTime = time.Unix(0, 0).UTC() var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements. // LoadedStorage returns storage with generated data using the provided load statements.
@ -89,7 +94,7 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp
} }
// RunBuiltinTests runs an acceptance test suite against the provided engine. // RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) { func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true

View file

@ -10,22 +10,54 @@ eval instant at 10s metric @ 100
metric{job="1"} 10 metric{job="1"} 10
metric{job="2"} 20 metric{job="2"} 20
eval instant at 10s metric @ 100s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 1m40s
metric{job="1"} 10
metric{job="2"} 20
eval instant at 10s metric @ 100 offset 50s eval instant at 10s metric @ 100 offset 50s
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric @ 100 offset 50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset 50s @ 100 eval instant at 10s metric offset 50s @ 100
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric offset 50 @ 100
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric offset -50s @ 0 eval instant at 10s metric offset -50s @ 0
metric{job="1"} 5 metric{job="1"} 5
metric{job="2"} 10 metric{job="2"} 10
eval instant at 10s metric offset -50 @ 0
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50s
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s metric @ 0 offset -50
metric{job="1"} 5
metric{job="2"} 10
eval instant at 10s -metric @ 100 eval instant at 10s -metric @ 100
{job="1"} -10 {job="1"} -10
{job="2"} -20 {job="2"} -20
@ -48,6 +80,12 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100 offset 50s)
eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100) eval instant at 25s sum_over_time(metric{job="1"}[100s] offset 50s @ 100)
{job="1"} 15 {job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100 offset 50)
{job="1"} 15
eval instant at 25s sum_over_time(metric{job="1"}[100] offset 50s @ 100)
{job="1"} 15
# Different timestamps. # Different timestamps.
eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100 eval instant at 25s metric{job="1"} @ 50 + metric{job="1"} @ 100
{job="1"} 15 {job="1"} 15
@ -58,6 +96,9 @@ eval instant at 25s rate(metric{job="1"}[100s] @ 100) + label_replace(rate(metri
eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "") eval instant at 25s sum_over_time(metric{job="1"}[100s] @ 100) + label_replace(sum_over_time(metric{job="2"}[100s] @ 100), "job", "1", "", "")
{job="1"} 165 {job="1"} 165
eval instant at 25s sum_over_time(metric{job="1"}[100] @ 100) + label_replace(sum_over_time(metric{job="2"}[100] @ 100), "job", "1", "", "")
{job="1"} 165
# Subqueries. # Subqueries.
# 10*(1+2+...+9) + 10. # 10*(1+2+...+9) + 10.
@ -72,6 +113,10 @@ eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] @ 100 offset 20s)
eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100) eval instant at 25s sum_over_time(metric{job="1"}[100s:1s] offset 20s @ 100)
{job="1"} 288 {job="1"} 288
# 10*(1+2+...+7) + 8.
eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100)
{job="1"} 288
# Subquery with different timestamps. # Subquery with different timestamps.
# Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries.

View file

@ -12,6 +12,11 @@ eval instant at 50m resets(http_requests[10m])
{path="/bar"} 0 {path="/bar"} 0
{path="/biz"} 0 {path="/biz"} 0
eval instant at 50m resets(http_requests[600])
{path="/foo"} 0
{path="/bar"} 0
{path="/biz"} 0
eval instant at 50m resets(http_requests[20m]) eval instant at 50m resets(http_requests[20m])
{path="/foo"} 1 {path="/foo"} 1
{path="/bar"} 0 {path="/bar"} 0
@ -250,10 +255,16 @@ eval instant at 50m deriv(testcounter_reset_middle[100m])
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600)
{} 70 {} 70
eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h)
{} 70
# intercept at t = 3000+3600 = 6600 # intercept at t = 3000+3600 = 6600
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 76.81818181818181 {} 76.81818181818181
eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h)
{} 76.81818181818181
# intercept at t = 600+3600 = 4200 # intercept at t = 600+3600 = 4200
eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600)
{} 51.36363636363637 {} 51.36363636363637

View file

@ -73,22 +73,32 @@ eval instant at 50m histogram_count(testhistogram3)
{start="positive"} 110 {start="positive"} 110
{start="negative"} 20 {start="negative"} 20
# Classic way of accessing the count still works.
eval instant at 50m testhistogram3_count
testhistogram3_count{start="positive"} 110
testhistogram3_count{start="negative"} 20
# Test histogram_sum. # Test histogram_sum.
eval instant at 50m histogram_sum(testhistogram3) eval instant at 50m histogram_sum(testhistogram3)
{start="positive"} 330 {start="positive"} 330
{start="negative"} 80 {start="negative"} 80
# Test histogram_avg. # Classic way of accessing the sum still works.
eval instant at 50m testhistogram3_sum
testhistogram3_sum{start="positive"} 330
testhistogram3_sum{start="negative"} 80
# Test histogram_avg. This has no classic equivalent.
eval instant at 50m histogram_avg(testhistogram3) eval instant at 50m histogram_avg(testhistogram3)
{start="positive"} 3 {start="positive"} 3
{start="negative"} 4 {start="negative"} 4
# Test histogram_stddev. # Test histogram_stddev. This has no classic equivalent.
eval instant at 50m histogram_stddev(testhistogram3) eval instant at 50m histogram_stddev(testhistogram3)
{start="positive"} 2.8189265757336734 {start="positive"} 2.8189265757336734
{start="negative"} 4.182715937754936 {start="negative"} 4.182715937754936
# Test histogram_stdvar. # Test histogram_stdvar. This has no classic equivalent.
eval instant at 50m histogram_stdvar(testhistogram3) eval instant at 50m histogram_stdvar(testhistogram3)
{start="positive"} 7.946347039377573 {start="positive"} 7.946347039377573
{start="negative"} 17.495112615949154 {start="negative"} 17.495112615949154
@ -103,137 +113,279 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m]))
{start="positive"} 0.6363636363636364 {start="positive"} 0.6363636363636364
{start="negative"} 0 {start="negative"} 0
# Test histogram_quantile. # In the classic histogram, we can access the corresponding bucket (if
# it exists) and divide by the count to get the same result.
eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count
{start="positive"} 0.6363636363636364
eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m])
{start="positive"} 0.6363636363636364
# Test histogram_quantile, native and classic.
eval instant at 50m histogram_quantile(0, testhistogram3)
{start="positive"} 0
{start="negative"} -0.25
eval instant at 50m histogram_quantile(0, testhistogram3_bucket) eval instant at 50m histogram_quantile(0, testhistogram3_bucket)
{start="positive"} 0 {start="positive"} 0
{start="negative"} -0.25 {start="negative"} -0.25
eval instant at 50m histogram_quantile(0.25, testhistogram3)
{start="positive"} 0.055
{start="negative"} -0.225
eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket) eval instant at 50m histogram_quantile(0.25, testhistogram3_bucket)
{start="positive"} 0.055 {start="positive"} 0.055
{start="negative"} -0.225 {start="negative"} -0.225
eval instant at 50m histogram_quantile(0.5, testhistogram3)
{start="positive"} 0.125
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket) eval instant at 50m histogram_quantile(0.5, testhistogram3_bucket)
{start="positive"} 0.125 {start="positive"} 0.125
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.75, testhistogram3)
{start="positive"} 0.45
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket) eval instant at 50m histogram_quantile(0.75, testhistogram3_bucket)
{start="positive"} 0.45 {start="positive"} 0.45
{start="negative"} -0.15 {start="negative"} -0.15
eval instant at 50m histogram_quantile(1, testhistogram3)
{start="positive"} 1
{start="negative"} -0.1
eval instant at 50m histogram_quantile(1, testhistogram3_bucket) eval instant at 50m histogram_quantile(1, testhistogram3_bucket)
{start="positive"} 1 {start="positive"} 1
{start="negative"} -0.1 {start="negative"} -0.1
# Quantile too low. # Quantile too low.
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram)
{start="positive"} -Inf
{start="negative"} -Inf
eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(-0.1, testhistogram_bucket)
{start="positive"} -Inf {start="positive"} -Inf
{start="negative"} -Inf {start="negative"} -Inf
# Quantile too high. # Quantile too high.
eval_warn instant at 50m histogram_quantile(1.01, testhistogram)
{start="positive"} +Inf
{start="negative"} +Inf
eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(1.01, testhistogram_bucket)
{start="positive"} +Inf {start="positive"} +Inf
{start="negative"} +Inf {start="negative"} +Inf
# Quantile invalid. # Quantile invalid.
eval_warn instant at 50m histogram_quantile(NaN, testhistogram)
{start="positive"} NaN
{start="negative"} NaN
eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket) eval_warn instant at 50m histogram_quantile(NaN, testhistogram_bucket)
{start="positive"} NaN {start="positive"} NaN
{start="negative"} NaN {start="negative"} NaN
# Quantile value in lowest bucket. # Quantile value in lowest bucket.
eval instant at 50m histogram_quantile(0, testhistogram)
{start="positive"} 0
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0, testhistogram_bucket) eval instant at 50m histogram_quantile(0, testhistogram_bucket)
{start="positive"} 0 {start="positive"} 0
{start="negative"} -0.2 {start="negative"} -0.2
# Quantile value in highest bucket. # Quantile value in highest bucket.
eval instant at 50m histogram_quantile(1, testhistogram)
{start="positive"} 1
{start="negative"} 0.3
eval instant at 50m histogram_quantile(1, testhistogram_bucket) eval instant at 50m histogram_quantile(1, testhistogram_bucket)
{start="positive"} 1 {start="positive"} 1
{start="negative"} 0.3 {start="negative"} 0.3
# Finally some useful quantiles. # Finally some useful quantiles.
eval instant at 50m histogram_quantile(0.2, testhistogram)
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, testhistogram_bucket) eval instant at 50m histogram_quantile(0.2, testhistogram_bucket)
{start="positive"} 0.048 {start="positive"} 0.048
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, testhistogram)
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, testhistogram_bucket) eval instant at 50m histogram_quantile(0.5, testhistogram_bucket)
{start="positive"} 0.15 {start="positive"} 0.15
{start="negative"} -0.15 {start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, testhistogram)
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) eval instant at 50m histogram_quantile(0.8, testhistogram_bucket)
{start="positive"} 0.72 {start="positive"} 0.72
{start="negative"} 0.3 {start="negative"} 0.3
# More realistic with rates. # More realistic with rates.
eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m]))
{start="positive"} 0.048
{start="negative"} -0.2
eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m]))
{start="positive"} 0.048 {start="positive"} 0.048
{start="negative"} -0.2 {start="negative"} -0.2
eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m]))
{start="positive"} 0.15
{start="negative"} -0.15
eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m]))
{start="positive"} 0.15 {start="positive"} 0.15
{start="negative"} -0.15 {start="negative"} -0.15
eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m]))
{start="positive"} 0.72
{start="negative"} 0.3
eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m]))
{start="positive"} 0.72 {start="positive"} 0.72
{start="negative"} 0.3 {start="negative"} 0.3
# Want results exactly in the middle of the bucket. # Want results exactly in the middle of the bucket.
eval instant at 7m histogram_quantile(1./6., testhistogram2)
{} 1
eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket) eval instant at 7m histogram_quantile(1./6., testhistogram2_bucket)
{} 1 {} 1
eval instant at 7m histogram_quantile(0.5, testhistogram2)
{} 3
eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket) eval instant at 7m histogram_quantile(0.5, testhistogram2_bucket)
{} 3 {} 3
eval instant at 7m histogram_quantile(5./6., testhistogram2)
{} 5
eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket) eval instant at 7m histogram_quantile(5./6., testhistogram2_bucket)
{} 5 {} 5
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2[15m]))
{} 1
eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m])) eval instant at 47m histogram_quantile(1./6., rate(testhistogram2_bucket[15m]))
{} 1 {} 1
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2[15m]))
{} 3
eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m])) eval instant at 47m histogram_quantile(0.5, rate(testhistogram2_bucket[15m]))
{} 3 {} 3
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2[15m]))
{} 5
eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m]))
{} 5 {} 5
# Aggregated histogram: Everything in one. # Aggregated histogram: Everything in one. Note how native histograms
# don't require aggregation by le.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075 {} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])))
{} 0.1277777777777778
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.1277777777777778 {} 0.1277777777777778
# Aggregated histogram: Everything in one. Now with avg, which does not change anything. # Aggregated histogram: Everything in one. Now with avg, which does not change anything.
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m])))
{} 0.075
eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.075 {} 0.075
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m])))
{} 0.12777777777777778
eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le))
{} 0.12777777777777778 {} 0.12777777777777778
# Aggregated histogram: By instance. # Aggregated histogram: By instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance))
{instance="ins1"} 0.075
{instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.075 {instance="ins1"} 0.075
{instance="ins2"} 0.075 {instance="ins2"} 0.075
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance))
{instance="ins1"} 0.1333333333
{instance="ins2"} 0.125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance))
{instance="ins1"} 0.1333333333 {instance="ins1"} 0.1333333333
{instance="ins2"} 0.125 {instance="ins2"} 0.125
# Aggregated histogram: By job. # Aggregated histogram: By job.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job))
{job="job1"} 0.1
{job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.1 {job="job1"} 0.1
{job="job2"} 0.0642857142857143 {job="job2"} 0.0642857142857143
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job))
{job="job1"} 0.14
{job="job2"} 0.1125
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job))
{job="job1"} 0.14 {job="job1"} 0.14
{job="job2"} 0.1125 {job="job2"} 0.1125
# Aggregated histogram: By job and instance. # Aggregated histogram: By job and instance.
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.11 {instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09 {instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06 {instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675 {instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.1166666666666667
eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance))
{instance="ins1", job="job1"} 0.15 {instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.1333333333333333 {instance="ins2", job="job1"} 0.1333333333333333
@ -241,18 +393,31 @@ eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bu
{instance="ins2", job="job2"} 0.1166666666666667 {instance="ins2", job="job2"} 0.1166666666666667
# The unaggregated histogram for comparison. Same result as the previous one. # The unaggregated histogram for comparison. Same result as the previous one.
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m]))
{instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.11 {instance="ins1", job="job1"} 0.11
{instance="ins2", job="job1"} 0.09 {instance="ins2", job="job1"} 0.09
{instance="ins1", job="job2"} 0.06 {instance="ins1", job="job2"} 0.06
{instance="ins2", job="job2"} 0.0675 {instance="ins2", job="job2"} 0.0675
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m]))
{instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667
eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m]))
{instance="ins1", job="job1"} 0.15 {instance="ins1", job="job1"} 0.15
{instance="ins2", job="job1"} 0.13333333333333333 {instance="ins2", job="job1"} 0.13333333333333333
{instance="ins1", job="job2"} 0.1 {instance="ins1", job="job2"} 0.1
{instance="ins2", job="job2"} 0.11666666666666667 {instance="ins2", job="job2"} 0.11666666666666667
# All NHCBs summed into one.
eval instant at 50m sum(request_duration_seconds) eval instant at 50m sum(request_duration_seconds)
{} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}}
@ -303,11 +468,13 @@ load_with_nhcb 5m
eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m])) eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m]))
{instance="ins1", job="job1"} NaN {instance="ins1", job="job1"} NaN
# Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set.
# https://github.com/prometheus/prometheus/issues/9910 # https://github.com/prometheus/prometheus/issues/9910
load_with_nhcb 5m load_with_nhcb 5m
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.1"} 0+1x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="0.2"} 0+3x10
request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10 request_duration_seconds2_bucket{job="job1", instance="ins1", le="+Inf"} 0+4x10
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket$"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"})
eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"})

View file

@ -747,6 +747,9 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4) eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
{} 1 {} 1
eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4)
{} 100
clear clear
# Counter reset only noticeable in a single bucket. # Counter reset only noticeable in a single bucket.

View file

@ -34,6 +34,11 @@ eval instant at 20s count_over_time(metric[10s])
eval instant at 20s count_over_time(metric[20s]) eval instant at 20s count_over_time(metric[20s])
{} 1 {} 1
eval instant at 20s count_over_time(metric[10])
eval instant at 20s count_over_time(metric[20])
{} 1
clear clear

View file

@ -76,6 +76,21 @@ eval instant at 1010s sum_over_time(metric1[30s:10s] offset 3s)
eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s) eval instant at 1010s sum_over_time((metric1)[30s:10s] offset 3s)
{} 297 {} 297
eval instant at 1010s sum_over_time(metric1[30:10] offset 3)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10s] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3s)
{} 297
eval instant at 1010s sum_over_time((metric1)[30:10] offset 3)
{} 297
# Nested subqueries # Nested subqueries
eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s])
{} 0.30000000000000004 {} 0.30000000000000004

View file

@ -151,7 +151,42 @@ func (g *Group) Name() string { return g.name }
func (g *Group) File() string { return g.file } func (g *Group) File() string { return g.file }
// Rules returns the group's rules. // Rules returns the group's rules.
func (g *Group) Rules() []Rule { return g.rules } func (g *Group) Rules(matcherSets ...[]*labels.Matcher) []Rule {
if len(matcherSets) == 0 {
return g.rules
}
var rules []Rule
for _, rule := range g.rules {
if matchesMatcherSets(matcherSets, rule.Labels()) {
rules = append(rules, rule)
}
}
return rules
}
func matches(lbls labels.Labels, matchers ...*labels.Matcher) bool {
for _, m := range matchers {
if v := lbls.Get(m.Name); !m.Matches(v) {
return false
}
}
return true
}
// matchesMatcherSets ensures all matches in each matcher set are ANDed and the set of those is ORed.
func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) bool {
if len(matcherSets) == 0 {
return true
}
var ok bool
for _, matchers := range matcherSets {
if matches(lbls, matchers...) {
ok = true
}
}
return ok
}
// Queryable returns the group's querable. // Queryable returns the group's querable.
func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable } func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable }

View file

@ -380,13 +380,13 @@ func (m *Manager) RuleGroups() []*Group {
} }
// Rules returns the list of the manager's rules. // Rules returns the list of the manager's rules.
func (m *Manager) Rules() []Rule { func (m *Manager) Rules(matcherSets ...[]*labels.Matcher) []Rule {
m.mtx.RLock() m.mtx.RLock()
defer m.mtx.RUnlock() defer m.mtx.RUnlock()
var rules []Rule var rules []Rule
for _, g := range m.groups { for _, g := range m.groups {
rules = append(rules, g.rules...) rules = append(rules, g.Rules(matcherSets...)...)
} }
return rules return rules

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Install Go - name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with: with:

View file

@ -238,11 +238,11 @@ func (errQuerier) Select(context.Context, bool, *storage.SelectHints, ...*labels
return storage.ErrSeriesSet(errSelect) return storage.ErrSeriesSet(errSelect)
} }
func (errQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (errQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label values error") return nil, nil, errors.New("label values error")
} }
func (errQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (errQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, errors.New("label names error") return nil, nil, errors.New("label names error")
} }

View file

@ -122,11 +122,11 @@ type MockQuerier struct {
SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet SelectMockFunction func(sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet
} }
func (q *MockQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *MockQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (q *MockQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *MockQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -161,12 +161,12 @@ type LabelQuerier interface {
// It is not safe to use the strings beyond the lifetime of the querier. // It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// LabelNames returns all the unique label names present in the block in sorted order. // LabelNames returns all the unique label names present in the block in sorted order.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label names of metrics matching the matchers. // to label names of metrics matching the matchers.
LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error)
// Close releases the resources of the Querier. // Close releases the resources of the Querier.
Close() error Close() error
@ -190,6 +190,9 @@ type SelectHints struct {
Start int64 // Start time in milliseconds for this select. Start int64 // Start time in milliseconds for this select.
End int64 // End time in milliseconds for this select. End int64 // End time in milliseconds for this select.
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
Step int64 // Query step size in milliseconds. Step int64 // Query step size in milliseconds.
Func string // String representation of surrounding function or aggregation. Func string // String representation of surrounding function or aggregation.
@ -217,6 +220,13 @@ type SelectHints struct {
DisableTrimming bool DisableTrimming bool
} }
// LabelHints specifies hints passed for label reads.
// This is used only as an option for implementation to use.
type LabelHints struct {
// Maximum number of results returned. Use a value of 0 to disable.
Limit int
}
// TODO(bwplotka): Move to promql/engine_test.go? // TODO(bwplotka): Move to promql/engine_test.go?
// QueryableFunc is an adapter to allow the use of ordinary functions as // QueryableFunc is an adapter to allow the use of ordinary functions as
// Queryables. It follows the idea of http.HandlerFunc. // Queryables. It follows the idea of http.HandlerFunc.

View file

@ -136,6 +136,11 @@ func (b *MemoizedSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHist
return b.it.AtFloatHistogram(nil) return b.it.AtFloatHistogram(nil)
} }
// AtT returns the timestamp of the current element of the iterator.
func (b *MemoizedSeriesIterator) AtT() int64 {
return b.it.AtT()
}
// Err returns the last encountered error. // Err returns the last encountered error.
func (b *MemoizedSeriesIterator) Err() error { func (b *MemoizedSeriesIterator) Err() error {
return b.it.Err() return b.it.Err()

View file

@ -29,13 +29,15 @@ func TestMemoizedSeriesIterator(t *testing.T) {
sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) { sampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram) {
if efh == nil { if efh == nil {
ts, v := it.At() ts, v := it.At()
require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ets, ts, "At() timestamp mismatch")
require.Equal(t, ev, v, "value mismatch") require.Equal(t, ev, v, "At() value mismatch")
} else { } else {
ts, fh := it.AtFloatHistogram() ts, fh := it.AtFloatHistogram()
require.Equal(t, ets, ts, "timestamp mismatch") require.Equal(t, ets, ts, "AtFloatHistogram() timestamp mismatch")
require.Equal(t, efh, fh, "histogram mismatch") require.Equal(t, efh, fh, "AtFloatHistogram() histogram mismatch")
} }
require.Equal(t, ets, it.AtT(), "AtT() timestamp mismatch")
} }
prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) { prevSampleEq := func(ets int64, ev float64, efh *histogram.FloatHistogram, eok bool) {
ts, v, fh, ok := it.PeekPrev() ts, v, fh, ok := it.PeekPrev()

View file

@ -169,8 +169,8 @@ func (l labelGenericQueriers) SplitByHalf() (labelGenericQueriers, labelGenericQ
// LabelValues returns all potential values for a label name. // LabelValues returns all potential values for a label name.
// If matchers are specified the returned result set is reduced // If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers. // to label values of metrics matching the matchers.
func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, ws, err := q.lvals(ctx, q.queriers, name, matchers...) res, ws, err := q.lvals(ctx, q.queriers, name, hints, matchers...)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err) return nil, nil, fmt.Errorf("LabelValues() from merge generic querier for label %s: %w", name, err)
} }
@ -178,22 +178,22 @@ func (q *mergeGenericQuerier) LabelValues(ctx context.Context, name string, matc
} }
// lvals performs merge sort for LabelValues from multiple queriers. // lvals performs merge sort for LabelValues from multiple queriers.
func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *mergeGenericQuerier) lvals(ctx context.Context, lq labelGenericQueriers, n string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
if lq.Len() == 0 { if lq.Len() == 0 {
return nil, nil, nil return nil, nil, nil
} }
if lq.Len() == 1 { if lq.Len() == 1 {
return lq.Get(0).LabelValues(ctx, n, matchers...) return lq.Get(0).LabelValues(ctx, n, hints, matchers...)
} }
a, b := lq.SplitByHalf() a, b := lq.SplitByHalf()
var ws annotations.Annotations var ws annotations.Annotations
s1, w, err := q.lvals(ctx, a, n, matchers...) s1, w, err := q.lvals(ctx, a, n, hints, matchers...)
ws.Merge(w) ws.Merge(w)
if err != nil { if err != nil {
return nil, ws, err return nil, ws, err
} }
s2, ws, err := q.lvals(ctx, b, n, matchers...) s2, ws, err := q.lvals(ctx, b, n, hints, matchers...)
ws.Merge(w) ws.Merge(w)
if err != nil { if err != nil {
return nil, ws, err return nil, ws, err
@ -229,13 +229,13 @@ func mergeStrings(a, b []string) []string {
} }
// LabelNames returns all the unique label names present in all queriers in sorted order. // LabelNames returns all the unique label names present in all queriers in sorted order.
func (q *mergeGenericQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *mergeGenericQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
var ( var (
labelNamesMap = make(map[string]struct{}) labelNamesMap = make(map[string]struct{})
warnings annotations.Annotations warnings annotations.Annotations
) )
for _, querier := range q.queriers { for _, querier := range q.queriers {
names, wrn, err := querier.LabelNames(ctx, matchers...) names, wrn, err := querier.LabelNames(ctx, hints, matchers...)
if wrn != nil { if wrn != nil {
// TODO(bwplotka): We could potentially wrap warnings. // TODO(bwplotka): We could potentially wrap warnings.
warnings.Merge(wrn) warnings.Merge(wrn)

View file

@ -1361,7 +1361,7 @@ func (m *mockGenericQuerier) Select(_ context.Context, b bool, _ *SelectHints, _
return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err} return &mockGenericSeriesSet{resp: m.resp, warnings: m.warnings, err: m.err}
} }
func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock() m.mtx.Lock()
m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{ m.labelNamesRequested = append(m.labelNamesRequested, labelNameRequest{
name: name, name: name,
@ -1371,7 +1371,7 @@ func (m *mockGenericQuerier) LabelValues(_ context.Context, name string, matcher
return m.resp, m.warnings, m.err return m.resp, m.warnings, m.err
} }
func (m *mockGenericQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (m *mockGenericQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
m.mtx.Lock() m.mtx.Lock()
m.labelNamesCalls++ m.labelNamesCalls++
m.mtx.Unlock() m.mtx.Unlock()
@ -1558,7 +1558,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
} }
}) })
t.Run("LabelNames", func(t *testing.T) { t.Run("LabelNames", func(t *testing.T) {
res, w, err := q.LabelNames(ctx) res, w, err := q.LabelNames(ctx, nil)
require.Subset(t, tcase.expectedWarnings, w) require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match") require.ErrorIs(t, err, tcase.expectedErrs[1], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) require.Equal(t, tcase.expectedLabels, res)
@ -1573,7 +1573,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
} }
}) })
t.Run("LabelValues", func(t *testing.T) { t.Run("LabelValues", func(t *testing.T) {
res, w, err := q.LabelValues(ctx, "test") res, w, err := q.LabelValues(ctx, "test", nil)
require.Subset(t, tcase.expectedWarnings, w) require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match") require.ErrorIs(t, err, tcase.expectedErrs[2], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) require.Equal(t, tcase.expectedLabels, res)
@ -1589,7 +1589,7 @@ func TestMergeGenericQuerierWithSecondaries_ErrorHandling(t *testing.T) {
}) })
t.Run("LabelValuesWithMatchers", func(t *testing.T) { t.Run("LabelValuesWithMatchers", func(t *testing.T) {
matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue") matcher := labels.MustNewMatcher(labels.MatchEqual, "otherLabel", "someValue")
res, w, err := q.LabelValues(ctx, "test2", matcher) res, w, err := q.LabelValues(ctx, "test2", nil, matcher)
require.Subset(t, tcase.expectedWarnings, w) require.Subset(t, tcase.expectedWarnings, w)
require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match") require.ErrorIs(t, err, tcase.expectedErrs[3], "expected error doesn't match")
require.Equal(t, tcase.expectedLabels, res) require.Equal(t, tcase.expectedLabels, res)

View file

@ -31,11 +31,11 @@ func (noopQuerier) Select(context.Context, bool, *SelectHints, ...*labels.Matche
return NoopSeriesSet() return NoopSeriesSet()
} }
func (noopQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (noopQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
@ -54,11 +54,11 @@ func (noopChunkQuerier) Select(context.Context, bool, *SelectHints, ...*labels.M
return NoopChunkedSeriesSet() return NoopChunkedSeriesSet()
} }
func (noopChunkQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopChunkQuerier) LabelValues(context.Context, string, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }
func (noopChunkQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (noopChunkQuerier) LabelNames(context.Context, *LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, nil return nil, nil, nil
} }

View file

@ -36,7 +36,8 @@ import (
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
) )
var testHistogram = histogram.Histogram{ var (
testHistogram = histogram.Histogram{
Schema: 2, Schema: 2,
ZeroThreshold: 1e-128, ZeroThreshold: 1e-128,
ZeroCount: 0, ZeroCount: 0,
@ -48,7 +49,7 @@ var testHistogram = histogram.Histogram{
NegativeBuckets: []int64{-1}, NegativeBuckets: []int64{-1},
} }
var writeRequestFixture = &prompb.WriteRequest{ writeRequestFixture = &prompb.WriteRequest{
Timeseries: []prompb.TimeSeries{ Timeseries: []prompb.TimeSeries{
{ {
Labels: []prompb.Label{ Labels: []prompb.Label{
@ -58,9 +59,9 @@ var writeRequestFixture = &prompb.WriteRequest{
{Name: "d", Value: "e"}, {Name: "d", Value: "e"},
{Name: "foo", Value: "bar"}, {Name: "foo", Value: "bar"},
}, },
Samples: []prompb.Sample{{Value: 1, Timestamp: 0}}, Samples: []prompb.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 0}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "f", Value: "g"}}, Value: 1, Timestamp: 1}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(0, &testHistogram), prompb.FromFloatHistogram(1, testHistogram.ToFloat(nil))}, Histograms: []prompb.Histogram{prompb.FromIntHistogram(1, &testHistogram), prompb.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
}, },
{ {
Labels: []prompb.Label{ Labels: []prompb.Label{
@ -70,14 +71,13 @@ var writeRequestFixture = &prompb.WriteRequest{
{Name: "d", Value: "e"}, {Name: "d", Value: "e"},
{Name: "foo", Value: "bar"}, {Name: "foo", Value: "bar"},
}, },
Samples: []prompb.Sample{{Value: 2, Timestamp: 1}}, Samples: []prompb.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 1}}, Exemplars: []prompb.Exemplar{{Labels: []prompb.Label{{Name: "h", Value: "i"}}, Value: 2, Timestamp: 2}},
Histograms: []prompb.Histogram{prompb.FromIntHistogram(2, &testHistogram), prompb.FromFloatHistogram(3, testHistogram.ToFloat(nil))}, Histograms: []prompb.Histogram{prompb.FromIntHistogram(3, &testHistogram), prompb.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
}, },
}, },
} }
var (
writeV2RequestSeries1Metadata = metadata.Metadata{ writeV2RequestSeries1Metadata = metadata.Metadata{
Type: model.MetricTypeGauge, Type: model.MetricTypeGauge,
Help: "Test gauge for test purposes", Help: "Test gauge for test purposes",
@ -88,42 +88,77 @@ var (
Help: "Test counter for test purposes", Help: "Test counter for test purposes",
} }
// writeV2RequestFixture represents the same request as writeRequestFixture, but using the v2 representation. // writeV2RequestFixture represents the same request as writeRequestFixture,
writeV2RequestFixture = func() *writev2.Request { // but using the v2 representation, plus includes writeV2RequestSeries1Metadata and writeV2RequestSeries2Metadata.
// NOTE: Use TestWriteV2RequestFixture and copy the diff to regenerate if needed.
writeV2RequestFixture = &writev2.Request{
Symbols: []string{"", "__name__", "test_metric1", "b", "c", "baz", "qux", "d", "e", "foo", "bar", "f", "g", "h", "i", "Test gauge for test purposes", "Maybe op/sec who knows (:", "Test counter for test purposes"},
Timeseries: []writev2.TimeSeries{
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Symbolized writeRequestFixture.Timeseries[0].Labels
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // writeV2RequestSeries1Metadata.Type.
HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
},
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // writeV2RequestSeries2Metadata.Type.
HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help.
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
},
},
}
)
func TestWriteV2RequestFixture(t *testing.T) {
// Generate dynamically writeV2RequestFixture, reusing v1 fixture elements.
st := writev2.NewSymbolTable() st := writev2.NewSymbolTable()
b := labels.NewScratchBuilder(0) b := labels.NewScratchBuilder(0)
labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil) labelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].ToLabels(&b, nil), nil)
exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) exemplar1LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[0].Exemplars[0].ToExemplar(&b, nil).Labels, nil) exemplar2LabelRefs := st.SymbolizeLabels(writeRequestFixture.Timeseries[1].Exemplars[0].ToExemplar(&b, nil).Labels, nil)
return &writev2.Request{ expected := &writev2.Request{
Timeseries: []writev2.TimeSeries{ Timeseries: []writev2.TimeSeries{
{ {
LabelsRefs: labelRefs, LabelsRefs: labelRefs,
Metadata: writev2.Metadata{ Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_GAUGE, // Same as writeV2RequestSeries1Metadata.Type, but in writev2. Type: writev2.Metadata_METRIC_TYPE_GAUGE,
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help), HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit), UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
}, },
Samples: []writev2.Sample{{Value: 1, Timestamp: 0}}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 0}}, Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))}, Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
}, },
{ {
LabelsRefs: labelRefs, LabelsRefs: labelRefs,
Metadata: writev2.Metadata{ Metadata: writev2.Metadata{
Type: writev2.Metadata_METRIC_TYPE_COUNTER, // Same as writeV2RequestSeries2Metadata.Type, but in writev2. Type: writev2.Metadata_METRIC_TYPE_COUNTER,
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help), HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
// No unit. // No unit.
}, },
Samples: []writev2.Sample{{Value: 2, Timestamp: 1}}, Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 1}}, Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(2, &testHistogram), writev2.FromFloatHistogram(3, testHistogram.ToFloat(nil))}, Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
}, },
}, },
Symbols: st.Symbols(), Symbols: st.Symbols(),
} }
}() // Check if it matches static writeV2RequestFixture.
) require.Equal(t, expected, writeV2RequestFixture)
}
func TestValidateLabelsAndMetricName(t *testing.T) { func TestValidateLabelsAndMetricName(t *testing.T) {
tests := []struct { tests := []struct {

View file

@ -39,9 +39,3 @@ func (m *maxTimestamp) Get() float64 {
defer m.mtx.Unlock() defer m.mtx.Unlock()
return m.value return m.value
} }
func (m *maxTimestamp) Collect(c chan<- prometheus.Metric) {
if m.Get() > 0 {
m.Gauge.Collect(c)
}
}

View file

@ -232,7 +232,7 @@ func newQueueManagerMetrics(r prometheus.Registerer, rn, e string) *queueManager
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "queue_highest_sent_timestamp_seconds", Name: "queue_highest_sent_timestamp_seconds",
Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch.", Help: "Timestamp from a WAL sample, the highest timestamp successfully sent by this queue, in seconds since epoch. Initialized to 0 when no data has been sent yet.",
ConstLabels: constLabels, ConstLabels: constLabels,
}), }),
} }
@ -1468,6 +1468,8 @@ func (q *queue) FlushAndShutdown(done <-chan struct{}) {
for q.tryEnqueueingBatch(done) { for q.tryEnqueueingBatch(done) {
time.Sleep(time.Second) time.Sleep(time.Second)
} }
q.batchMtx.Lock()
defer q.batchMtx.Unlock()
q.batch = nil q.batch = nil
close(q.batchQueue) close(q.batchQueue)
} }

View file

@ -60,7 +60,7 @@ func newHighestTimestampMetric() *maxTimestamp {
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "highest_timestamp_in_seconds", Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet",
}), }),
} }
} }

View file

@ -210,13 +210,13 @@ func (q querier) addExternalLabels(ms []*labels.Matcher) ([]*labels.Matcher, []s
} }
// LabelValues implements storage.Querier and is a noop. // LabelValues implements storage.Querier and is a noop.
func (q *querier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *querier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented") return nil, nil, errors.New("not implemented")
} }
// LabelNames implements storage.Querier and is a noop. // LabelNames implements storage.Querier and is a noop.
func (q *querier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *querier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
// TODO: Implement: https://github.com/prometheus/prometheus/issues/3351 // TODO: Implement: https://github.com/prometheus/prometheus/issues/3351
return nil, nil, errors.New("not implemented") return nil, nil, errors.New("not implemented")
} }

View file

@ -100,7 +100,7 @@ func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, f
Namespace: namespace, Namespace: namespace,
Subsystem: subsystem, Subsystem: subsystem,
Name: "highest_timestamp_in_seconds", Name: "highest_timestamp_in_seconds",
Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch.", Help: "Highest timestamp that has come into the remote storage via the Appender interface, in seconds since epoch. Initialized to 0 when no data has been received yet.",
}), }),
}, },
} }

View file

@ -19,6 +19,7 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"strconv"
"strings" "strings"
"time" "time"
@ -27,6 +28,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
@ -44,6 +46,7 @@ type writeHandler struct {
appendable storage.Appendable appendable storage.Appendable
samplesWithInvalidLabelsTotal prometheus.Counter samplesWithInvalidLabelsTotal prometheus.Counter
samplesAppendedWithoutMetadata prometheus.Counter
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{} acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
} }
@ -52,6 +55,9 @@ const maxAheadTime = 10 * time.Minute
// NewWriteHandler creates a http.Handler that accepts remote write requests with // NewWriteHandler creates a http.Handler that accepts remote write requests with
// the given message in acceptedProtoMsgs and writes them to the provided appendable. // the given message in acceptedProtoMsgs and writes them to the provided appendable.
//
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
for _, acc := range acceptedProtoMsgs { for _, acc := range acceptedProtoMsgs {
@ -61,15 +67,18 @@ func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable st
logger: logger, logger: logger,
appendable: appendable, appendable: appendable,
acceptedProtoMsgs: protoMsgs, acceptedProtoMsgs: protoMsgs,
samplesWithInvalidLabelsTotal: prometheus.NewCounter(prometheus.CounterOpts{ samplesWithInvalidLabelsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus", Namespace: "prometheus",
Subsystem: "api", Subsystem: "api",
Name: "remote_write_invalid_labels_samples_total", Name: "remote_write_invalid_labels_samples_total",
Help: "The total number of remote write samples which contains invalid labels.", Help: "The total number of received remote write samples and histogram samples which were rejected due to invalid labels.",
}),
samplesAppendedWithoutMetadata: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "remote_write_without_metadata_appended_samples_total",
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
}), }),
}
if reg != nil {
reg.MustRegister(h.samplesWithInvalidLabelsTotal)
} }
return h return h
} }
@ -108,15 +117,15 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
contentType = appProtoContentType contentType = appProtoContentType
} }
msg, err := h.parseProtoMsg(contentType) msgType, err := h.parseProtoMsg(contentType)
if err != nil { if err != nil {
level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err)
http.Error(w, err.Error(), http.StatusUnsupportedMediaType) http.Error(w, err.Error(), http.StatusUnsupportedMediaType)
return return
} }
if _, ok := h.acceptedProtoMsgs[msg]; !ok { if _, ok := h.acceptedProtoMsgs[msgType]; !ok {
err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msg, func() (ret []string) { err := fmt.Errorf("%v protobuf message is not accepted by this server; accepted %v", msgType, func() (ret []string) {
for k := range h.acceptedProtoMsgs { for k := range h.acceptedProtoMsgs {
ret = append(ret, string(k)) ret = append(ret, string(k))
} }
@ -154,100 +163,111 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
// Now we have a decompressed buffer we can unmarshal it. // Now we have a decompressed buffer we can unmarshal it.
switch msg {
case config.RemoteWriteProtoMsgV1: if msgType == config.RemoteWriteProtoMsgV1 {
// PRW 1.0 flow has different proto message and no partial write handling.
var req prompb.WriteRequest var req prompb.WriteRequest
if err := proto.Unmarshal(decompressed, &req); err != nil { if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error? // TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msg, "err", err.Error()) level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
err = h.write(r.Context(), &req) if err = h.write(r.Context(), &req); err != nil {
case config.RemoteWriteProtoMsgV2:
var req writev2.Request
if err := proto.Unmarshal(decompressed, &req); err != nil {
// TODO(bwplotka): Add more context to responded error?
level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msg, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
err = h.writeV2(r.Context(), &req)
}
switch { switch {
case err == nil:
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample): case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp), errors.Is(err, storage.ErrTooOldSample):
// Indicated an out of order sample is a bad request to prevent retries. // Indicated an out-of-order sample is a bad request to prevent retries.
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
default: default:
level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
}
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
return
} }
// checkAppendExemplarError modifies the AppendExemplar's returned error based on the error cause. // Remote Write 2.x proto message handling.
func (h *writeHandler) checkAppendExemplarError(err error, e exemplar.Exemplar, outOfOrderErrs *int) error { var req writev2.Request
unwrappedErr := errors.Unwrap(err) if err := proto.Unmarshal(decompressed, &req); err != nil {
if unwrappedErr == nil { // TODO(bwplotka): Add more context to responded error?
unwrappedErr = err level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest)
return
} }
switch {
case errors.Is(unwrappedErr, storage.ErrNotFound): respStats, errHTTPCode, err := h.writeV2(r.Context(), &req)
return storage.ErrNotFound
case errors.Is(unwrappedErr, storage.ErrOutOfOrderExemplar): // Set required X-Prometheus-Remote-Write-Written-* response headers, in all cases.
*outOfOrderErrs++ respStats.SetResponseHeaders(w.Header())
level.Debug(h.logger).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e))
return nil if err != nil {
default: if errHTTPCode/5 == 100 { // 5xx
return err level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error())
} }
http.Error(w, err.Error(), errHTTPCode)
return
}
w.WriteHeader(http.StatusNoContent)
} }
func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) { func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err error) {
outOfOrderExemplarErrs := 0 outOfOrderExemplarErrs := 0
samplesWithInvalidLabels := 0 samplesWithInvalidLabels := 0
samplesAppended := 0
timeLimitApp := &timeLimitAppender{ app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx), Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
defer func() { defer func() {
if err != nil { if err != nil {
_ = timeLimitApp.Rollback() _ = app.Rollback()
return return
} }
err = timeLimitApp.Commit() err = app.Commit()
if err != nil {
h.samplesAppendedWithoutMetadata.Add(float64(samplesAppended))
}
}() }()
b := labels.NewScratchBuilder(0) b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, nil) ls := ts.ToLabels(&b, nil)
if !ls.IsValid() { if !ls.Has(labels.MetricName) || !ls.IsValid() {
level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String())
samplesWithInvalidLabels++ samplesWithInvalidLabels++
// TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are
// potentially written. Perhaps unify with fixed writeV2 implementation a bit.
continue continue
} }
err := h.appendSamples(timeLimitApp, ts.Samples, ls) if err := h.appendV1Samples(app, ts.Samples, ls); err != nil {
if err != nil {
return err return err
} }
samplesAppended += len(ts.Samples)
for _, ep := range ts.Exemplars { for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil) e := ep.ToExemplar(&b, nil)
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs) if _, err := app.AppendExemplar(0, ls, e); err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderExemplar):
outOfOrderExemplarErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
default:
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
}
} }
err = h.appendHistograms(timeLimitApp, ts.Histograms, ls) if err = h.appendV1Histograms(app, ts.Histograms, ls); err != nil {
if err != nil {
return err return err
} }
samplesAppended += len(ts.Histograms)
} }
if outOfOrderExemplarErrs > 0 { if outOfOrderExemplarErrs > 0 {
@ -256,151 +276,216 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err
if samplesWithInvalidLabels > 0 { if samplesWithInvalidLabels > 0 {
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
} }
return nil return nil
} }
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (err error) { func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
outOfOrderExemplarErrs := 0 var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
timeLimitApp := &timeLimitAppender{ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
}
const (
prw20WrittenSamplesHeader = "X-Prometheus-Remote-Write-Written-Samples"
rw20WrittenHistogramsHeader = "X-Prometheus-Remote-Write-Written-Histograms"
rw20WrittenExemplarsHeader = "X-Prometheus-Remote-Write-Written-Exemplars"
)
type responseStats struct {
samples int
histograms int
exemplars int
}
func (s responseStats) SetResponseHeaders(h http.Header) {
h.Set(prw20WrittenSamplesHeader, strconv.Itoa(s.samples))
h.Set(rw20WrittenHistogramsHeader, strconv.Itoa(s.histograms))
h.Set(rw20WrittenExemplarsHeader, strconv.Itoa(s.exemplars))
}
// writeV2 is similar to write, but it works with v2 proto message,
// allows partial 4xx writes and gathers statistics.
//
// writeV2 returns the statistics.
// In error cases, writeV2, also returns statistics, but also the error that
// should be propagated to the remote write sender and httpCode to use for status.
//
// NOTE(bwplotka): TSDB storage is NOT idempotent, so we don't allow "partial retry-able" errors.
// Once we have 5xx type of error, we immediately stop and rollback all appends.
func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ responseStats, errHTTPCode int, _ error) {
app := &timeLimitAppender{
Appender: h.appendable.Appender(ctx), Appender: h.appendable.Appender(ctx),
maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)), maxTime: timestamp.FromTime(time.Now().Add(maxAheadTime)),
} }
defer func() { rs := responseStats{}
samplesWithoutMetadata, errHTTPCode, err := h.appendV2(app, req, &rs)
if err != nil { if err != nil {
_ = timeLimitApp.Rollback() if errHTTPCode/5 == 100 {
return // On 5xx, we always rollback, because we expect
// sender to retry and TSDB is not idempotent.
if rerr := app.Rollback(); rerr != nil {
level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr)
}
return responseStats{}, errHTTPCode, err
} }
err = timeLimitApp.Commit()
}()
b := labels.NewScratchBuilder(0) // Non-retriable (e.g. bad request error case). Can be partially written.
commitErr := app.Commit()
if commitErr != nil {
// Bad requests does not matter as we have internal error (retryable).
return responseStats{}, http.StatusInternalServerError, commitErr
}
// Bad request error happened, but rest of data (if any) was written.
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return rs, errHTTPCode, err
}
// All good just commit.
if err := app.Commit(); err != nil {
return responseStats{}, http.StatusInternalServerError, err
}
h.samplesAppendedWithoutMetadata.Add(float64(samplesWithoutMetadata))
return rs, 0, nil
}
func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *responseStats) (samplesWithoutMetadata, errHTTPCode int, err error) {
var (
badRequestErrs []error
outOfOrderExemplarErrs, samplesWithInvalidLabels int
b = labels.NewScratchBuilder(0)
)
for _, ts := range req.Timeseries { for _, ts := range req.Timeseries {
ls := ts.ToLabels(&b, req.Symbols) ls := ts.ToLabels(&b, req.Symbols)
// Validate series labels early.
err := h.appendSamplesV2(timeLimitApp, ts.Samples, ls) // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose
if err != nil { // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case.
return err if !ls.Has(labels.MetricName) || !ls.IsValid() {
badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String()))
samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms)
continue
} }
allSamplesSoFar := rs.samples + rs.histograms
var ref storage.SeriesRef
// Samples.
for _, s := range ts.Samples {
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
if err == nil {
rs.samples++
continue
}
// Handle append error.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) ||
errors.Is(err, storage.ErrTooOldSample) {
// TODO(bwplotka): Not too spammy log?
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err
}
// Native Histograms.
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err == nil {
rs.histograms++
continue
}
// Handle append error.
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(err, storage.ErrOutOfOrderSample) ||
errors.Is(err, storage.ErrOutOfBounds) ||
errors.Is(err, storage.ErrDuplicateSampleForTimestamp) {
// TODO(bwplotka): Not too spammy log?
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp)
badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String()))
continue
}
return 0, http.StatusInternalServerError, err
}
// Exemplars.
for _, ep := range ts.Exemplars { for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, req.Symbols) e := ep.ToExemplar(&b, req.Symbols)
h.appendExemplar(timeLimitApp, e, ls, &outOfOrderExemplarErrs) ref, err = app.AppendExemplar(ref, ls, e)
if err == nil {
rs.exemplars++
continue
} }
// Handle append error.
err = h.appendHistogramsV2(timeLimitApp, ts.Histograms, ls) // TODO(bwplotka): I left the logic as in v1, but we might want to make it consistent with samples and histograms.
if err != nil { // Since exemplar storage is still experimental, we don't fail in anyway, the request on ingestion errors.
return err if errors.Is(err, storage.ErrOutOfOrderExemplar) {
outOfOrderExemplarErrs++
level.Debug(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e))
continue
}
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err)
} }
m := ts.ToMetadata(req.Symbols) m := ts.ToMetadata(req.Symbols)
if _, err = timeLimitApp.UpdateMetadata(0, ls, m); err != nil { if _, err = app.UpdateMetadata(ref, ls, m); err != nil {
level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err)
// Metadata is attached to each series, so since Prometheus does not reject sample without metadata information,
// we don't report remote write error either. We increment metric instead.
samplesWithoutMetadata += (rs.samples + rs.histograms) - allSamplesSoFar
} }
} }
if outOfOrderExemplarErrs > 0 { if outOfOrderExemplarErrs > 0 {
_ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs)
} }
h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels))
return nil if len(badRequestErrs) == 0 {
return samplesWithoutMetadata, 0, nil
} }
// TODO(bwplotka): Better concat formatting? Perhaps add size limit?
func (h *writeHandler) appendExemplar(app storage.Appender, e exemplar.Exemplar, labels labels.Labels, outOfOrderExemplarErrs *int) { return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
_, err := app.AppendExemplar(0, labels, e)
err = h.checkAppendExemplarError(err, e, outOfOrderExemplarErrs)
if err != nil {
// Since exemplar storage is still experimental, we don't fail the request on ingestion errors
level.Debug(h.logger).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", err)
}
}
func (h *writeHandler) appendSamples(app storage.Appender, ss []prompb.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendSamplesV2(app storage.Appender, ss []writev2.Sample, labels labels.Labels) error {
var ref storage.SeriesRef
var err error
for _, s := range ss {
ref, err = app.Append(ref, labels, s.GetTimestamp(), s.GetValue())
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendHistograms(app storage.Appender, hh []prompb.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
}
func (h *writeHandler) appendHistogramsV2(app storage.Appender, hh []writev2.Histogram, labels labels.Labels) error {
var err error
for _, hp := range hh {
if hp.IsFloatHistogram() {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
_, err = app.AppendHistogram(0, labels, hp.Timestamp, hp.ToIntHistogram(), nil)
}
if err != nil {
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
// Although AppendHistogram does not currently return ErrDuplicateSampleForTimestamp there is
// a note indicating its inclusion in the future.
if errors.Is(unwrappedErr, storage.ErrOutOfOrderSample) || errors.Is(unwrappedErr, storage.ErrOutOfBounds) || errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp) {
level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp)
}
return err
}
}
return nil
} }
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and

View file

@ -16,6 +16,7 @@ package remote
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"math" "math"
@ -27,6 +28,7 @@ import (
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/gogo/protobuf/proto"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -290,8 +292,134 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
} }
} }
func expectHeaderValue(t testing.TB, expected int, got string) {
t.Helper()
require.NotEmpty(t, got)
i, err := strconv.Atoi(got)
require.NoError(t, err)
require.Equal(t, expected, i)
}
func TestRemoteWriteHandler_V2Message(t *testing.T) { func TestRemoteWriteHandler_V2Message(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), writeV2RequestFixture.Timeseries, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy") // V2 supports partial writes for non-retriable errors, so test them.
for _, tc := range []struct {
desc string
input []writev2.TimeSeries
expectedCode int
expectedRespBody string
commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
}{
{
desc: "All timeseries accepted",
input: writeV2RequestFixture.Timeseries,
expectedCode: http.StatusNoContent,
},
{
desc: "Partial write; first series with invalid labels (no metric name)",
input: append(
// Series with test_metric1="test_metric1" labels.
[]writev2.TimeSeries{{LabelsRefs: []uint32{2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
writeV2RequestFixture.Timeseries...),
expectedCode: http.StatusBadRequest,
expectedRespBody: "invalid metric name or labels, got {test_metric1=\"test_metric1\"}\n",
},
{
desc: "Partial write; first series with invalid labels (empty metric name)",
input: append(
// Series with __name__="" labels.
[]writev2.TimeSeries{{LabelsRefs: []uint32{1, 0}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}},
writeV2RequestFixture.Timeseries...),
expectedCode: http.StatusBadRequest,
expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n",
},
{
desc: "Partial write; first series with one OOO sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, writev2.Sample{Value: 2, Timestamp: 0})
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one dup sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Samples = append(f.Timeseries[0].Samples, f.Timeseries[0].Samples[0])
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one OOO histogram sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil)))
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "out of order sample for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
{
desc: "Partial write; first series with one dup histogram sample",
input: func() []writev2.TimeSeries {
f := proto.Clone(writeV2RequestFixture).(*writev2.Request)
f.Timeseries[0].Histograms = append(f.Timeseries[0].Histograms, f.Timeseries[0].Histograms[1])
return f.Timeseries
}(),
expectedCode: http.StatusBadRequest,
expectedRespBody: "duplicate sample for timestamp for series {__name__=\"test_metric1\", b=\"c\", baz=\"qux\", d=\"e\", foo=\"bar\"}\n",
},
// Non retriable errors from various parts.
{
desc: "Internal sample append error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
appendSampleErr: errors.New("some sample internal append error"),
expectedCode: http.StatusInternalServerError,
expectedRespBody: "some sample internal append error\n",
},
{
desc: "Internal histogram sample append error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
appendHistogramErr: errors.New("some histogram sample internal append error"),
expectedCode: http.StatusInternalServerError,
expectedRespBody: "some histogram sample internal append error\n",
},
{
desc: "Partial write; skipped exemplar; exemplar storage errs are noop",
input: writeV2RequestFixture.Timeseries,
appendExemplarErr: errors.New("some exemplar append error"),
expectedCode: http.StatusNoContent,
},
{
desc: "Partial write; skipped metadata; metadata storage errs are noop",
input: writeV2RequestFixture.Timeseries,
updateMetadataErr: errors.New("some metadata update error"),
expectedCode: http.StatusNoContent,
},
{
desc: "Internal commit error; rollback triggered",
input: writeV2RequestFixture.Timeseries,
commitErr: errors.New("storage error"),
expectedCode: http.StatusInternalServerError,
expectedRespBody: "storage error\n",
},
} {
t.Run(tc.desc, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(log.NewNopLogger(), tc.input, writeV2RequestFixture.Symbols, nil, nil, nil, "snappy")
require.NoError(t, err) require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
@ -301,40 +429,59 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
req.Header.Set("Content-Encoding", string(SnappyBlockCompression)) req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue) req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{} appendable := &mockAppendable{
commitErr: tc.commitErr,
appendSampleErr: tc.appendSampleErr,
appendHistogramErr: tc.appendHistogramErr,
appendExemplarErr: tc.appendExemplarErr,
updateMetadataErr: tc.updateMetadataErr,
}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req) handler.ServeHTTP(recorder, req)
resp := recorder.Result() resp := recorder.Result()
require.Equal(t, http.StatusNoContent, resp.StatusCode) require.Equal(t, tc.expectedCode, resp.StatusCode)
respBody, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, tc.expectedRespBody, string(respBody))
b := labels.NewScratchBuilder(0) if tc.expectedCode == http.StatusInternalServerError {
i := 0 // We don't expect writes for partial writes with retry-able code.
j := 0 expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
k := 0 expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
require.Empty(t, len(appendable.samples))
require.Empty(t, len(appendable.histograms))
require.Empty(t, len(appendable.exemplars))
require.Empty(t, len(appendable.metadata))
return
}
// Double check mandatory 2.0 stats.
// writeV2RequestFixture has 2 series with 1 sample, 2 histograms, 1 exemplar each.
expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Samples"))
expectHeaderValue(t, 4, resp.Header.Get("X-Prometheus-Remote-Write-Written-Histograms"))
if tc.appendExemplarErr != nil {
expectHeaderValue(t, 0, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
} else {
expectHeaderValue(t, 2, resp.Header.Get("X-Prometheus-Remote-Write-Written-Exemplars"))
}
// Double check what was actually appended.
var (
b = labels.NewScratchBuilder(0)
i, j, k, m int
)
for _, ts := range writeV2RequestFixture.Timeseries { for _, ts := range writeV2RequestFixture.Timeseries {
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols) ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
for _, s := range ts.Samples { for _, s := range ts.Samples {
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i]) requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
switch i {
case 0:
requireEqual(t, mockMetadata{ls, writeV2RequestSeries1Metadata}, appendable.metadata[i])
case 1:
requireEqual(t, mockMetadata{ls, writeV2RequestSeries2Metadata}, appendable.metadata[i])
default:
t.Fatal("more series/samples then expected")
}
i++ i++
} }
for _, e := range ts.Exemplars {
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
for _, hp := range ts.Histograms { for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() { if hp.IsFloatHistogram() {
fh := hp.ToFloatHistogram() fh := hp.ToFloatHistogram()
@ -345,9 +492,24 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
} }
k++ k++
} }
if tc.appendExemplarErr == nil {
for _, e := range ts.Exemplars {
exemplarLabels := e.ToExemplar(&b, writeV2RequestFixture.Symbols).Labels
requireEqual(t, mockExemplar{ls, exemplarLabels, e.Timestamp, e.Value}, appendable.exemplars[j])
j++
}
}
if tc.updateMetadataErr == nil {
expectedMeta := ts.ToMetadata(writeV2RequestFixture.Symbols)
requireEqual(t, mockMetadata{ls, expectedMeta}, appendable.metadata[m])
m++
}
}
})
} }
} }
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderSample_V1Message(t *testing.T) { func TestOutOfOrderSample_V1Message(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
Name string Name string
@ -372,7 +534,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{latestSample: 100} appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -384,49 +546,10 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
} }
} }
func TestOutOfOrderSample_V2Message(t *testing.T) {
for _, tc := range []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
} {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{1, 2},
Samples: []writev2.Sample{{Value: 1, Timestamp: tc.Timestamp}},
}}, []string{"", "__name__", "metric1"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestSample: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
// This test case currently aims to verify that the WriteHandler endpoint // This test case currently aims to verify that the WriteHandler endpoint
// don't fail on exemplar ingestion errors since the exemplar storage is // don't fail on exemplar ingestion errors since the exemplar storage is
// still experimental. // still experimental.
// NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
func TestOutOfOrderExemplar_V1Message(t *testing.T) { func TestOutOfOrderExemplar_V1Message(t *testing.T) {
tests := []struct { tests := []struct {
Name string Name string
@ -453,7 +576,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{latestExemplar: 100} appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -466,49 +589,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
} }
} }
func TestOutOfOrderExemplar_V2Message(t *testing.T) { // NOTE: V2 Message is tested in TestRemoteWriteHandler_V2Message.
tests := []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
}
for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{1, 2},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{3, 4}, Value: 1, Timestamp: tc.Timestamp}},
}}, []string{"", "__name__", "metric1", "foo", "bar"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestExemplar: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
// TODO: update to require.Equal(t, http.StatusConflict, resp.StatusCode) once exemplar storage is not experimental.
require.Equal(t, http.StatusNoContent, resp.StatusCode)
})
}
}
func TestOutOfOrderHistogram_V1Message(t *testing.T) { func TestOutOfOrderHistogram_V1Message(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
Name string Name string
@ -533,7 +614,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
req, err := http.NewRequest("", "", bytes.NewReader(payload)) req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err) require.NoError(t, err)
appendable := &mockAppendable{latestHistogram: 100} appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}) handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -545,46 +626,6 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
} }
} }
func TestOutOfOrderHistogram_V2Message(t *testing.T) {
for _, tc := range []struct {
Name string
Timestamp int64
}{
{
Name: "historic",
Timestamp: 0,
},
{
Name: "future",
Timestamp: math.MaxInt64,
},
} {
t.Run(tc.Name, func(t *testing.T) {
payload, _, _, err := buildV2WriteRequest(nil, []writev2.TimeSeries{{
LabelsRefs: []uint32{0, 1},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(0, &testHistogram), writev2.FromFloatHistogram(1, testHistogram.ToFloat(nil))},
}}, []string{"__name__", "metric1"}, nil, nil, nil, "snappy")
require.NoError(t, err)
req, err := http.NewRequest("", "", bytes.NewReader(payload))
require.NoError(t, err)
req.Header.Set("Content-Type", remoteWriteContentTypeHeaders[config.RemoteWriteProtoMsgV2])
req.Header.Set("Content-Encoding", string(SnappyBlockCompression))
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{latestHistogram: 100}
handler := NewWriteHandler(log.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
resp := recorder.Result()
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
}
}
func BenchmarkRemoteWriteHandler(b *testing.B) { func BenchmarkRemoteWriteHandler(b *testing.B) {
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte" const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
var reqs []*http.Request var reqs []*http.Request
@ -719,15 +760,20 @@ func genSeriesWithSample(numSeries int, ts int64) []prompb.TimeSeries {
} }
type mockAppendable struct { type mockAppendable struct {
latestSample int64 latestSample map[uint64]int64
samples []mockSample samples []mockSample
latestExemplar int64 latestExemplar map[uint64]int64
exemplars []mockExemplar exemplars []mockExemplar
latestHistogram int64 latestHistogram map[uint64]int64
histograms []mockHistogram histograms []mockHistogram
metadata []mockMetadata metadata []mockMetadata
// optional errors to inject.
commitErr error commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
} }
type mockSample struct { type mockSample struct {
@ -765,48 +811,92 @@ func requireEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...inte
} }
func (m *mockAppendable) Appender(_ context.Context) storage.Appender { func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
if m.latestSample == nil {
m.latestSample = map[uint64]int64{}
}
if m.latestHistogram == nil {
m.latestHistogram = map[uint64]int64{}
}
if m.latestExemplar == nil {
m.latestExemplar = map[uint64]int64{}
}
return m return m
} }
func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
if t < m.latestSample { if m.appendSampleErr != nil {
return 0, storage.ErrOutOfOrderSample return 0, m.appendSampleErr
} }
m.latestSample = t latestTs := m.latestSample[l.Hash()]
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if t == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
m.latestSample[l.Hash()] = t
m.samples = append(m.samples, mockSample{l, t, v}) m.samples = append(m.samples, mockSample{l, t, v})
return 0, nil return 0, nil
} }
func (m *mockAppendable) Commit() error { func (m *mockAppendable) Commit() error {
if m.commitErr != nil {
_ = m.Rollback() // As per Commit method contract.
}
return m.commitErr return m.commitErr
} }
func (*mockAppendable) Rollback() error { func (m *mockAppendable) Rollback() error {
return fmt.Errorf("not implemented") m.samples = m.samples[:0]
m.exemplars = m.exemplars[:0]
m.histograms = m.histograms[:0]
m.metadata = m.metadata[:0]
return nil
} }
func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) { func (m *mockAppendable) AppendExemplar(_ storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
if e.Ts < m.latestExemplar { if m.appendExemplarErr != nil {
return 0, storage.ErrOutOfOrderExemplar return 0, m.appendExemplarErr
} }
m.latestExemplar = e.Ts latestTs := m.latestExemplar[l.Hash()]
if e.Ts < latestTs {
return 0, storage.ErrOutOfOrderExemplar
}
if e.Ts == latestTs {
return 0, storage.ErrDuplicateExemplar
}
m.latestExemplar[l.Hash()] = e.Ts
m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value}) m.exemplars = append(m.exemplars, mockExemplar{l, e.Labels, e.Ts, e.Value})
return 0, nil return 0, nil
} }
func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if t < m.latestHistogram { if m.appendHistogramErr != nil {
return 0, storage.ErrOutOfOrderSample return 0, m.appendHistogramErr
} }
m.latestHistogram = t latestTs := m.latestHistogram[l.Hash()]
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if t == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
m.latestHistogram[l.Hash()] = t
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh}) m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
return 0, nil return 0, nil
} }
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
if m.updateMetadataErr != nil {
return 0, m.updateMetadataErr
}
m.metadata = append(m.metadata, mockMetadata{l: l, m: mp}) m.metadata = append(m.metadata, mockMetadata{l: l, m: mp})
return 0, nil return 0, nil
} }

View file

@ -369,7 +369,7 @@ func TestWriteStorageApplyConfig_PartialUpdate(t *testing.T) {
} }
func TestOTLPWriteHandler(t *testing.T) { func TestOTLPWriteHandler(t *testing.T) {
exportRequest := generateOTLPWriteRequest(t) exportRequest := generateOTLPWriteRequest()
buf, err := exportRequest.MarshalProto() buf, err := exportRequest.MarshalProto()
require.NoError(t, err) require.NoError(t, err)
@ -392,7 +392,7 @@ func TestOTLPWriteHandler(t *testing.T) {
require.Len(t, appendable.exemplars, 1) // 1 (exemplar) require.Len(t, appendable.exemplars, 1) // 1 (exemplar)
} }
func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
d := pmetric.NewMetrics() d := pmetric.NewMetrics()
// Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram // Generate One Counter, One Gauge, One Histogram, One Exponential-Histogram
@ -422,6 +422,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest {
counterDataPoint.Attributes().PutStr("foo.bar", "baz") counterDataPoint.Attributes().PutStr("foo.bar", "baz")
counterExemplar := counterDataPoint.Exemplars().AppendEmpty() counterExemplar := counterDataPoint.Exemplars().AppendEmpty()
counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) counterExemplar.SetTimestamp(pcommon.NewTimestampFromTime(timestamp))
counterExemplar.SetDoubleValue(10.0) counterExemplar.SetDoubleValue(10.0)
counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7}) counterExemplar.SetSpanID(pcommon.SpanID{0, 1, 2, 3, 4, 5, 6, 7})

View file

@ -49,16 +49,16 @@ func newSecondaryQuerierFromChunk(cq ChunkQuerier) genericQuerier {
return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)} return &secondaryQuerier{genericQuerier: newGenericQuerierFromChunk(cq)}
} }
func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (s *secondaryQuerier) LabelValues(ctx context.Context, name string, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
vals, w, err := s.genericQuerier.LabelValues(ctx, name, matchers...) vals, w, err := s.genericQuerier.LabelValues(ctx, name, hints, matchers...)
if err != nil { if err != nil {
return nil, w.Add(err), nil return nil, w.Add(err), nil
} }
return vals, w, nil return vals, w, nil
} }
func (s *secondaryQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (s *secondaryQuerier) LabelNames(ctx context.Context, hints *LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
names, w, err := s.genericQuerier.LabelNames(ctx, matchers...) names, w, err := s.genericQuerier.LabelNames(ctx, hints, matchers...)
if err != nil { if err != nil {
return nil, w.Add(err), nil return nil, w.Add(err), nil
} }

View file

@ -60,7 +60,7 @@ type XORChunk struct {
b bstream b bstream
} }
// NewXORChunk returns a new chunk with XOR encoding of the given size. // NewXORChunk returns a new chunk with XOR encoding.
func NewXORChunk() *XORChunk { func NewXORChunk() *XORChunk {
b := make([]byte, 2, 128) b := make([]byte, 2, 128)
return &XORChunk{b: bstream{stream: b, count: 0}} return &XORChunk{b: bstream{stream: b, count: 0}}

View file

@ -1001,7 +1001,7 @@ func TestWALFlushedOnDBClose(t *testing.T) {
q, err := db.Querier(0, 1) q, err := db.Querier(0, 1)
require.NoError(t, err) require.NoError(t, err)
values, ws, err := q.LabelValues(ctx, "labelname") values, ws, err := q.LabelValues(ctx, "labelname", nil)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, ws) require.Empty(t, ws)
require.Equal(t, []string{"labelvalue"}, values) require.Equal(t, []string{"labelvalue"}, values)
@ -1976,7 +1976,7 @@ func TestQuerierWithBoundaryChunks(t *testing.T) {
defer q.Close() defer q.Close()
// The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block. // The requested interval covers 2 blocks, so the querier's label values for blockID should give us 2 values, one from each block.
b, ws, err := q.LabelValues(ctx, "blockID") b, ws, err := q.LabelValues(ctx, "blockID", nil)
require.NoError(t, err) require.NoError(t, err)
var nilAnnotations annotations.Annotations var nilAnnotations annotations.Annotations
require.Equal(t, nilAnnotations, ws) require.Equal(t, nilAnnotations, ws)
@ -2288,7 +2288,7 @@ func TestDB_LabelNames(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
var ws annotations.Annotations var ws annotations.Annotations
labelNames, ws, err = q.LabelNames(ctx) labelNames, ws, err = q.LabelNames(ctx, nil)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, ws) require.Empty(t, ws)
require.NoError(t, q.Close()) require.NoError(t, q.Close())

View file

@ -848,10 +848,11 @@ func (a *headAppender) Commit() (err error) {
inOrderMint int64 = math.MaxInt64 inOrderMint int64 = math.MaxInt64
inOrderMaxt int64 = math.MinInt64 inOrderMaxt int64 = math.MinInt64
ooomint int64 = math.MaxInt64 oooMinT int64 = math.MaxInt64
ooomaxt int64 = math.MinInt64 oooMaxT int64 = math.MinInt64
wblSamples []record.RefSample wblSamples []record.RefSample
oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef
oooMmapMarkersCount int
oooRecords [][]byte oooRecords [][]byte
oooCapMax = a.head.opts.OutOfOrderCapMax.Load() oooCapMax = a.head.opts.OutOfOrderCapMax.Load()
series *memSeries series *memSeries
@ -872,6 +873,7 @@ func (a *headAppender) Commit() (err error) {
// WBL is not enabled. So no need to collect. // WBL is not enabled. So no need to collect.
wblSamples = nil wblSamples = nil
oooMmapMarkers = nil oooMmapMarkers = nil
oooMmapMarkersCount = 0
return return
} }
// The m-map happens before adding a new sample. So we collect // The m-map happens before adding a new sample. So we collect
@ -880,13 +882,15 @@ func (a *headAppender) Commit() (err error) {
// WBL Before this Commit(): [old samples before this commit for chunk 1] // WBL Before this Commit(): [old samples before this commit for chunk 1]
// WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3]
if oooMmapMarkers != nil { if oooMmapMarkers != nil {
markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers)) markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount)
for ref, mmapRef := range oooMmapMarkers { for ref, mmapRefs := range oooMmapMarkers {
for _, mmapRef := range mmapRefs {
markers = append(markers, record.RefMmapMarker{ markers = append(markers, record.RefMmapMarker{
Ref: ref, Ref: ref,
MmapRef: mmapRef, MmapRef: mmapRef,
}) })
} }
}
r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) r := enc.MmapMarkers(markers, a.head.getBytesBuffer())
oooRecords = append(oooRecords, r) oooRecords = append(oooRecords, r)
} }
@ -928,32 +932,39 @@ func (a *headAppender) Commit() (err error) {
case oooSample: case oooSample:
// Sample is OOO and OOO handling is enabled // Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance. // and the delta is within the OOO tolerance.
var mmapRef chunks.ChunkDiskMapperRef var mmapRefs []chunks.ChunkDiskMapperRef
ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax)
if chunkCreated { if chunkCreated {
r, ok := oooMmapMarkers[series.ref] r, ok := oooMmapMarkers[series.ref]
if !ok || r != 0 { if !ok || r != nil {
// !ok means there are no markers collected for these samples yet. So we first flush the samples // !ok means there are no markers collected for these samples yet. So we first flush the samples
// before setting this m-map marker. // before setting this m-map marker.
// r != 0 means we have already m-mapped a chunk for this series in the same Commit(). // r != nil means we have already m-mapped a chunk for this series in the same Commit().
// Hence, before we m-map again, we should add the samples and m-map markers // Hence, before we m-map again, we should add the samples and m-map markers
// seen till now to the WBL records. // seen till now to the WBL records.
collectOOORecords() collectOOORecords()
} }
if oooMmapMarkers == nil { if oooMmapMarkers == nil {
oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef) oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
}
if len(mmapRefs) > 0 {
oooMmapMarkers[series.ref] = mmapRefs
oooMmapMarkersCount += len(mmapRefs)
} else {
// No chunk was written to disk, so we need to set an initial marker for this series.
oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
oooMmapMarkersCount++
} }
oooMmapMarkers[series.ref] = mmapRef
} }
if ok { if ok {
wblSamples = append(wblSamples, s) wblSamples = append(wblSamples, s)
if s.T < ooomint { if s.T < oooMinT {
ooomint = s.T oooMinT = s.T
} }
if s.T > ooomaxt { if s.T > oooMaxT {
ooomaxt = s.T oooMaxT = s.T
} }
floatOOOAccepted++ floatOOOAccepted++
} else { } else {
@ -1053,7 +1064,7 @@ func (a *headAppender) Commit() (err error) {
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted)) a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted))
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt) a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
collectOOORecords() collectOOORecords()
if a.head.wbl != nil { if a.head.wbl != nil {
@ -1069,14 +1080,14 @@ func (a *headAppender) Commit() (err error) {
} }
// insert is like append, except it inserts. Used for OOO samples. // insert is like append, except it inserts. Used for OOO samples.
func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) { func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) {
if s.ooo == nil { if s.ooo == nil {
s.ooo = &memSeriesOOOFields{} s.ooo = &memSeriesOOOFields{}
} }
c := s.ooo.oooHeadChunk c := s.ooo.oooHeadChunk
if c == nil || c.chunk.NumSamples() == int(oooCapMax) { if c == nil || c.chunk.NumSamples() == int(oooCapMax) {
// Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks.
c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper) c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper)
chunkCreated = true chunkCreated = true
} }
@ -1089,7 +1100,7 @@ func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDisk
c.maxTime = t c.maxTime = t
} }
} }
return ok, chunkCreated, mmapRef return ok, chunkCreated, mmapRefs
} }
// chunkOpts are chunk-level options that are passed when appending to a memSeries. // chunkOpts are chunk-level options that are passed when appending to a memSeries.
@ -1431,7 +1442,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange
// cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk.
// The caller must ensure that s.ooo is not nil. // The caller must ensure that s.ooo is not nil.
func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) {
ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper)
s.ooo.oooHeadChunk = &oooHeadChunk{ s.ooo.oooHeadChunk = &oooHeadChunk{
@ -1443,21 +1454,29 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk
return s.ooo.oooHeadChunk, ref return s.ooo.oooHeadChunk, ref
} }
func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) chunks.ChunkDiskMapperRef { func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef {
if s.ooo == nil || s.ooo.oooHeadChunk == nil { if s.ooo == nil || s.ooo.oooHeadChunk == nil {
// There is no head chunk, so nothing to m-map here. // OOO is not enabled or there is no head chunk, so nothing to m-map here.
return 0 return nil
} }
xor, _ := s.ooo.oooHeadChunk.chunk.ToXOR() // Encode to XorChunk which is more compact and implements all of the needed functionality. chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, xor, true, handleChunkWriteError) if err != nil {
handleChunkWriteError(err)
return nil
}
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
for _, memchunk := range chks {
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
chunkRefs = append(chunkRefs, chunkRef)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef, ref: chunkRef,
numSamples: uint16(xor.NumSamples()), numSamples: uint16(memchunk.chunk.NumSamples()),
minTime: s.ooo.oooHeadChunk.minTime, minTime: memchunk.minTime,
maxTime: s.ooo.oooHeadChunk.maxTime, maxTime: memchunk.maxTime,
}) })
}
s.ooo.oooHeadChunk = nil s.ooo.oooHeadChunk = nil
return chunkRef return chunkRefs
} }
// mmapChunks will m-map all but first chunk on s.headChunks list. // mmapChunks will m-map all but first chunk on s.headChunks list.

View file

@ -4730,6 +4730,14 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) {
// TestWBLReplay checks the replay at a low level. // TestWBLReplay checks the replay at a low level.
func TestWBLReplay(t *testing.T) { func TestWBLReplay(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testWBLReplay(t, scenario)
})
}
}
func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
dir := t.TempDir() dir := t.TempDir()
wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy) wal, err := wlog.NewSize(nil, nil, filepath.Join(dir, "wal"), 32768, wlog.CompressionSnappy)
require.NoError(t, err) require.NoError(t, err)
@ -4745,11 +4753,11 @@ func TestWBLReplay(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, h.Init(0)) require.NoError(t, h.Init(0))
var expOOOSamples []sample var expOOOSamples []chunks.Sample
l := labels.FromStrings("foo", "bar") l := labels.FromStrings("foo", "bar")
appendSample := func(mins int64, isOOO bool) { appendSample := func(mins int64, val float64, isOOO bool) {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
ts, v := mins*time.Minute.Milliseconds(), float64(mins) ts, v := mins*time.Minute.Milliseconds(), val
_, err := app.Append(0, l, ts, v) _, err := app.Append(0, l, ts, v)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, app.Commit()) require.NoError(t, app.Commit())
@ -4760,15 +4768,15 @@ func TestWBLReplay(t *testing.T) {
} }
// In-order sample. // In-order sample.
appendSample(60, false) appendSample(60, 60, false)
// Out of order samples. // Out of order samples.
appendSample(40, true) appendSample(40, 40, true)
appendSample(35, true) appendSample(35, 35, true)
appendSample(50, true) appendSample(50, 50, true)
appendSample(55, true) appendSample(55, 55, true)
appendSample(59, true) appendSample(59, 59, true)
appendSample(31, true) appendSample(31, 31, true)
// Check that Head's time ranges are set properly. // Check that Head's time ranges are set properly.
require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime()) require.Equal(t, 60*time.Minute.Milliseconds(), h.MinTime())
@ -4792,22 +4800,23 @@ func TestWBLReplay(t *testing.T) {
require.False(t, ok) require.False(t, ok)
require.NotNil(t, ms) require.NotNil(t, ms)
xor, err := ms.ooo.oooHeadChunk.chunk.ToXOR() chks, err := ms.ooo.oooHeadChunk.chunk.ToEncodedChunks(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, chks, 1)
it := xor.Iterator(nil) it := chks[0].chunk.Iterator(nil)
actOOOSamples := make([]sample, 0, len(expOOOSamples)) actOOOSamples, err := storage.ExpandSamples(it, nil)
for it.Next() == chunkenc.ValFloat { require.NoError(t, err)
ts, v := it.At()
actOOOSamples = append(actOOOSamples, sample{t: ts, f: v})
}
// OOO chunk will be sorted. Hence sort the expected samples. // OOO chunk will be sorted. Hence sort the expected samples.
sort.Slice(expOOOSamples, func(i, j int) bool { sort.Slice(expOOOSamples, func(i, j int) bool {
return expOOOSamples[i].t < expOOOSamples[j].t return expOOOSamples[i].T() < expOOOSamples[j].T()
}) })
require.Equal(t, expOOOSamples, actOOOSamples) // Passing in true for the 'ignoreCounterResets' parameter prevents differences in counter reset headers
// from being factored in to the sample comparison
// TODO(fionaliao): understand counter reset behaviour, might want to modify this later
requireEqualSamples(t, l.String(), expOOOSamples, actOOOSamples, true)
require.NoError(t, h.Close()) require.NoError(t, h.Close())
} }

View file

@ -17,9 +17,10 @@ import (
"fmt" "fmt"
"sort" "sort"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/oklog/ulid" "github.com/oklog/ulid"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tombstones" "github.com/prometheus/prometheus/tsdb/tombstones"
) )
@ -74,24 +75,22 @@ func (o *OOOChunk) NumSamples() int {
return len(o.samples) return len(o.samples)
} }
func (o *OOOChunk) ToXOR() (*chunkenc.XORChunk, error) { // ToEncodedChunks returns chunks with the samples in the OOOChunk.
x := chunkenc.NewXORChunk() //
app, err := x.Appender() //nolint:revive // unexported-return.
if err != nil { func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error) {
return nil, err if len(o.samples) == 0 {
} return nil, nil
for _, s := range o.samples {
app.Append(s.t, s.f)
}
return x, nil
}
func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk, error) {
x := chunkenc.NewXORChunk()
app, err := x.Appender()
if err != nil {
return nil, err
} }
// The most common case is that there will be a single chunk, with the same type of samples in it - this is always true for float samples.
chks = make([]memChunk, 0, 1)
var (
cmint int64
cmaxt int64
chunk chunkenc.Chunk
app chunkenc.Appender
)
prevEncoding := chunkenc.EncNone // Yes we could call the chunk for this, but this is more efficient.
for _, s := range o.samples { for _, s := range o.samples {
if s.t < mint { if s.t < mint {
continue continue
@ -99,9 +98,77 @@ func (o *OOOChunk) ToXORBetweenTimestamps(mint, maxt int64) (*chunkenc.XORChunk,
if s.t > maxt { if s.t > maxt {
break break
} }
app.Append(s.t, s.f) encoding := chunkenc.EncXOR
if s.h != nil {
encoding = chunkenc.EncHistogram
} else if s.fh != nil {
encoding = chunkenc.EncFloatHistogram
} }
return x, nil
// prevApp is the appender for the previous sample.
prevApp := app
if encoding != prevEncoding { // For the first sample, this will always be true as EncNone != EncXOR | EncHistogram | EncFloatHistogram
if prevEncoding != chunkenc.EncNone {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
cmint = s.t
switch encoding {
case chunkenc.EncXOR:
chunk = chunkenc.NewXORChunk()
case chunkenc.EncHistogram:
chunk = chunkenc.NewHistogramChunk()
case chunkenc.EncFloatHistogram:
chunk = chunkenc.NewFloatHistogramChunk()
default:
chunk = chunkenc.NewXORChunk()
}
app, err = chunk.Appender()
if err != nil {
return
}
}
switch encoding {
case chunkenc.EncXOR:
app.Append(s.t, s.f)
case chunkenc.EncHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.HistogramAppender)
var (
newChunk chunkenc.Chunk
recoded bool
)
newChunk, recoded, app, _ = app.AppendHistogram(prevHApp, s.t, s.h, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
chunk = newChunk
cmint = s.t
}
case chunkenc.EncFloatHistogram:
// Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway.
prevHApp, _ := prevApp.(*chunkenc.FloatHistogramAppender)
var (
newChunk chunkenc.Chunk
recoded bool
)
newChunk, recoded, app, _ = app.AppendFloatHistogram(prevHApp, s.t, s.fh, false)
if newChunk != nil { // A new chunk was allocated.
if !recoded {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
chunk = newChunk
cmint = s.t
}
}
cmaxt = s.t
prevEncoding = encoding
}
if prevEncoding != chunkenc.EncNone {
chks = append(chks, memChunk{chunk, cmint, cmaxt, nil})
}
return chks, nil
} }
var _ BlockReader = &OOORangeHead{} var _ BlockReader = &OOORangeHead{}

View file

@ -108,11 +108,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra
c := s.ooo.oooHeadChunk c := s.ooo.oooHeadChunk
if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 {
ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks))))
var xor chunkenc.Chunk
if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least. if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least.
xor, _ = c.chunk.ToXOR() // Ignoring error because it can't fail. chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime)
if err != nil {
handleChunkWriteError(err)
return nil
}
for _, chk := range chks {
addChunk(c.minTime, c.maxTime, ref, chk.chunk)
}
} else {
var emptyChunk chunkenc.Chunk
addChunk(c.minTime, c.maxTime, ref, emptyChunk)
} }
addChunk(c.minTime, c.maxTime, ref, xor)
} }
} }
for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- {
@ -341,14 +349,20 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead,
continue continue
} }
mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) var lastMmapRef chunks.ChunkDiskMapperRef
if mmapRef == 0 && len(ms.ooo.oooMmappedChunks) > 0 { mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper)
if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 {
// Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists.
mmapRef = ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref}
} }
seq, off := mmapRef.Unpack() if len(mmapRefs) == 0 {
lastMmapRef = 0
} else {
lastMmapRef = mmapRefs[len(mmapRefs)-1]
}
seq, off := lastMmapRef.Unpack()
if seq > lastSeq || (seq == lastSeq && off > lastOff) { if seq > lastSeq || (seq == lastSeq && off > lastOff) {
ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off ch.lastMmapRef, lastSeq, lastOff = lastMmapRef, seq, off
} }
if len(ms.ooo.oooMmappedChunks) > 0 { if len(ms.ooo.oooMmappedChunks) > 0 {
ch.postings = append(ch.postings, seriesRef) ch.postings = append(ch.postings, seriesRef)

View file

@ -77,12 +77,12 @@ func newBlockBaseQuerier(b BlockReader, mint, maxt int64) (*blockBaseQuerier, er
}, nil }, nil
} }
func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.SortedLabelValues(ctx, name, matchers...) res, err := q.index.SortedLabelValues(ctx, name, matchers...)
return res, nil, err return res, nil, err
} }
func (q *blockBaseQuerier) LabelNames(ctx context.Context, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
res, err := q.index.LabelNames(ctx, matchers...) res, err := q.index.LabelNames(ctx, matchers...)
return res, nil, err return res, nil, err
} }

View file

@ -3022,7 +3022,7 @@ func TestQuerierIndexQueriesRace(t *testing.T) {
q, err := db.Querier(math.MinInt64, math.MaxInt64) q, err := db.Querier(math.MinInt64, math.MaxInt64)
require.NoError(t, err) require.NoError(t, err)
values, _, err := q.LabelValues(ctx, "seq", c.matchers...) values, _, err := q.LabelValues(ctx, "seq", nil, c.matchers...)
require.NoError(t, err) require.NoError(t, err)
require.Emptyf(t, values, `label values for label "seq" should be empty`) require.Emptyf(t, values, `label values for label "seq" should be empty`)

View file

@ -543,7 +543,7 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr
return histograms, nil return histograms, nil
} }
// Decode decodes a Histogram from a byte slice. // DecodeFloatHistogram decodes a Histogram from a byte slice.
func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) { func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
fh.CounterResetHint = histogram.CounterResetHint(buf.Byte()) fh.CounterResetHint = histogram.CounterResetHint(buf.Byte())

View file

@ -265,6 +265,11 @@ func (w *Watcher) loop() {
// Run the watcher, which will tail the WAL until the quit channel is closed // Run the watcher, which will tail the WAL until the quit channel is closed
// or an error case is hit. // or an error case is hit.
func (w *Watcher) Run() error { func (w *Watcher) Run() error {
_, lastSegment, err := w.firstAndLast()
if err != nil {
return fmt.Errorf("wal.Segments: %w", err)
}
// We want to ensure this is false across iterations since // We want to ensure this is false across iterations since
// Run will be called again if there was a failure to read the WAL. // Run will be called again if there was a failure to read the WAL.
w.sendSamples = false w.sendSamples = false
@ -289,20 +294,14 @@ func (w *Watcher) Run() error {
return err return err
} }
level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment) level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
for !isClosed(w.quit) { for !isClosed(w.quit) {
w.currentSegmentMetric.Set(float64(currentSegment)) w.currentSegmentMetric.Set(float64(currentSegment))
// Re-check on each iteration in case a new segment was added, // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
// because watch() will wait for notifications on the last segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
_, lastSegment, err := w.firstAndLast() level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment)
if err != nil { if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) {
return fmt.Errorf("wal.Segments: %w", err)
}
tail := currentSegment >= lastSegment
level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment, "lastSegment", lastSegment)
if err := w.watch(currentSegment, tail); err != nil && !errors.Is(err, ErrIgnorable) {
return err return err
} }

View file

@ -17,6 +17,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"path" "path"
"runtime"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -700,11 +701,46 @@ func TestRun_StartupTime(t *testing.T) {
} }
} }
func generateWALRecords(w *WL, segment, seriesCount, samplesCount int) error {
enc := record.Encoder{}
for j := 0; j < seriesCount; j++ {
ref := j + (segment * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", segment)),
},
}, nil)
if err := w.Log(series); err != nil {
return err
}
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(segment),
V: float64(segment),
},
}, nil)
if err := w.Log(sample); err != nil {
return err
}
}
}
return nil
}
func TestRun_AvoidNotifyWhenBehind(t *testing.T) { func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
const pageSize = 32 * 1024 if runtime.GOOS == "windows" { // Takes a really long time, perhaps because min sleep time is 15ms.
const segments = 10 t.SkipNow()
const seriesCount = 20 }
const samplesCount = 300 const segmentSize = pageSize // Smallest allowed segment size.
const segmentsToWrite = 5
const segmentsToRead = segmentsToWrite - 1
const seriesCount = 10
const samplesCount = 50
// This test can take longer than intended to finish in cloud CI. // This test can take longer than intended to finish in cloud CI.
readTimeout := 10 * time.Second readTimeout := 10 * time.Second
@ -717,73 +753,37 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
err := os.Mkdir(wdir, 0o777) err := os.Mkdir(wdir, 0o777)
require.NoError(t, err) require.NoError(t, err)
enc := record.Encoder{} w, err := NewSize(nil, nil, wdir, segmentSize, compress)
w, err := NewSize(nil, nil, wdir, pageSize, compress)
require.NoError(t, err) require.NoError(t, err)
var wg sync.WaitGroup var wg sync.WaitGroup
// add one segment initially to ensure there's a value > 0 for the last segment id // Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk.
for i := 0; i < 1; i++ { require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount))
for j := 0; j < seriesCount; j++ { w.NextSegment() // Force creation of the next segment
ref := j + (i * 100)
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
}
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
for i := 1; i < segments; i++ { for i := 1; i < segmentsToWrite; i++ {
for j := 0; j < seriesCount; j++ { require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
ref := j + (i * 100) w.NextSegment()
series := enc.Series([]record.RefSeries{
{
Ref: chunks.HeadSeriesRef(ref),
Labels: labels.FromStrings("__name__", fmt.Sprintf("metric_%d", i)),
},
}, nil)
require.NoError(t, w.Log(series))
for k := 0; k < samplesCount; k++ {
inner := rand.Intn(ref + 1)
sample := enc.Samples([]record.RefSample{
{
Ref: chunks.HeadSeriesRef(inner),
T: int64(i),
V: float64(i),
},
}, nil)
require.NoError(t, w.Log(sample))
}
}
} }
}() }()
wt := newWriteToMock(time.Millisecond) wt := newWriteToMock(time.Millisecond)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false) watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = segments watcher.MaxSegment = segmentsToRead
watcher.setMetrics() watcher.setMetrics()
startTime := time.Now() startTime := time.Now()
err = watcher.Run() err = watcher.Run()
wg.Wait() wg.Wait()
require.Less(t, time.Since(startTime), readTimeout) require.Less(t, time.Since(startTime), readTimeout)
// But samples records shouldn't get dropped
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() > 0
})
require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, w.Close()) require.NoError(t, w.Close())
}) })

View file

@ -75,7 +75,7 @@ func createFlagRow(flag *kingpin.FlagModel) []string {
name = fmt.Sprintf(`<code class="text-nowrap">-%c</code>, <code class="text-nowrap">--%s</code>`, flag.Short, flag.Name) name = fmt.Sprintf(`<code class="text-nowrap">-%c</code>, <code class="text-nowrap">--%s</code>`, flag.Short, flag.Name)
} }
return []string{name, flag.Help, defaultVal} return []string{name, strings.ReplaceAll(flag.Help, "|", `\|`), defaultVal}
} }
func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error { func writeFlagTable(writer io.Writer, level int, fgm *kingpin.FlagGroupModel) error {

View file

@ -660,6 +660,10 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
} }
hints := &storage.LabelHints{
Limit: toHintLimit(limit),
}
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil { if err != nil {
return apiFuncResult{nil, returnAPIError(err), nil, nil} return apiFuncResult{nil, returnAPIError(err), nil, nil}
@ -674,7 +678,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
labelNamesSet := make(map[string]struct{}) labelNamesSet := make(map[string]struct{})
for _, matchers := range matcherSets { for _, matchers := range matcherSets {
vals, callWarnings, err := q.LabelNames(r.Context(), matchers...) vals, callWarnings, err := q.LabelNames(r.Context(), hints, matchers...)
if err != nil { if err != nil {
return apiFuncResult{nil, returnAPIError(err), warnings, nil} return apiFuncResult{nil, returnAPIError(err), warnings, nil}
} }
@ -696,7 +700,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
if len(matcherSets) == 1 { if len(matcherSets) == 1 {
matchers = matcherSets[0] matchers = matcherSets[0]
} }
names, warnings, err = q.LabelNames(r.Context(), matchers...) names, warnings, err = q.LabelNames(r.Context(), hints, matchers...)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil}
} }
@ -706,7 +710,7 @@ func (api *API) labelNames(r *http.Request) apiFuncResult {
names = []string{} names = []string{}
} }
if len(names) > limit { if limit > 0 && len(names) > limit {
names = names[:limit] names = names[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit")) warnings = warnings.Add(errors.New("results truncated due to limit"))
} }
@ -740,6 +744,10 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
} }
hints := &storage.LabelHints{
Limit: toHintLimit(limit),
}
q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end)) q, err := api.Queryable.Querier(timestamp.FromTime(start), timestamp.FromTime(end))
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil} return apiFuncResult{nil, &apiError{errorExec, err}, nil, nil}
@ -764,7 +772,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
var callWarnings annotations.Annotations var callWarnings annotations.Annotations
labelValuesSet := make(map[string]struct{}) labelValuesSet := make(map[string]struct{})
for _, matchers := range matcherSets { for _, matchers := range matcherSets {
vals, callWarnings, err = q.LabelValues(ctx, name, matchers...) vals, callWarnings, err = q.LabelValues(ctx, name, hints, matchers...)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
} }
@ -783,7 +791,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
if len(matcherSets) == 1 { if len(matcherSets) == 1 {
matchers = matcherSets[0] matchers = matcherSets[0]
} }
vals, warnings, err = q.LabelValues(ctx, name, matchers...) vals, warnings, err = q.LabelValues(ctx, name, hints, matchers...)
if err != nil { if err != nil {
return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer}
} }
@ -795,7 +803,7 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
slices.Sort(vals) slices.Sort(vals)
if len(vals) > limit { if limit > 0 && len(vals) > limit {
vals = vals[:limit] vals = vals[:limit]
warnings = warnings.Add(errors.New("results truncated due to limit")) warnings = warnings.Add(errors.New("results truncated due to limit"))
} }
@ -865,6 +873,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
Start: timestamp.FromTime(start), Start: timestamp.FromTime(start),
End: timestamp.FromTime(end), End: timestamp.FromTime(end),
Func: "series", // There is no series function, this token is used for lookups that don't need samples. Func: "series", // There is no series function, this token is used for lookups that don't need samples.
Limit: toHintLimit(limit),
} }
var set storage.SeriesSet var set storage.SeriesSet
@ -891,7 +900,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
} }
metrics = append(metrics, set.At().Labels()) metrics = append(metrics, set.At().Labels())
if len(metrics) > limit { if limit > 0 && len(metrics) > limit {
metrics = metrics[:limit] metrics = metrics[:limit]
warnings.Add(errors.New("results truncated due to limit")) warnings.Add(errors.New("results truncated due to limit"))
return apiFuncResult{metrics, nil, warnings, closer} return apiFuncResult{metrics, nil, warnings, closer}
@ -1397,6 +1406,11 @@ func (api *API) rules(r *http.Request) apiFuncResult {
rgSet := queryFormToSet(r.Form["rule_group[]"]) rgSet := queryFormToSet(r.Form["rule_group[]"])
fSet := queryFormToSet(r.Form["file[]"]) fSet := queryFormToSet(r.Form["file[]"])
matcherSets, err := parseMatchersParam(r.Form["match[]"])
if err != nil {
return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil}
}
ruleGroups := api.rulesRetriever(r.Context()).RuleGroups() ruleGroups := api.rulesRetriever(r.Context()).RuleGroups()
res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))} res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, 0, len(ruleGroups))}
typ := strings.ToLower(r.URL.Query().Get("type")) typ := strings.ToLower(r.URL.Query().Get("type"))
@ -1436,7 +1450,8 @@ func (api *API) rules(r *http.Request) apiFuncResult {
EvaluationTime: grp.GetEvaluationTime().Seconds(), EvaluationTime: grp.GetEvaluationTime().Seconds(),
LastEvaluation: grp.GetLastEvaluation(), LastEvaluation: grp.GetLastEvaluation(),
} }
for _, rr := range grp.Rules() {
for _, rr := range grp.Rules(matcherSets...) {
var enrichedRule Rule var enrichedRule Rule
if len(rnSet) > 0 { if len(rnSet) > 0 {
@ -1902,8 +1917,8 @@ OUTER:
return matcherSets, nil return matcherSets, nil
} }
// parseLimitParam returning 0 means no limit is to be applied.
func parseLimitParam(limitStr string) (limit int, err error) { func parseLimitParam(limitStr string) (limit int, err error) {
limit = math.MaxInt
if limitStr == "" { if limitStr == "" {
return limit, nil return limit, nil
} }
@ -1912,9 +1927,19 @@ func parseLimitParam(limitStr string) (limit int, err error) {
if err != nil { if err != nil {
return limit, err return limit, err
} }
if limit <= 0 { if limit < 0 {
return limit, errors.New("limit must be positive") return limit, errors.New("limit must be non-negative")
} }
return limit, nil return limit, nil
} }
// toHintLimit increases the API limit, as returned by parseLimitParam, by 1.
// This allows for emitting warnings when the results are truncated.
func toHintLimit(limit int) int {
// 0 means no limit and avoid int overflow
if limit > 0 && limit < math.MaxInt {
return limit + 1
}
return limit
}

View file

@ -261,11 +261,36 @@ func (m *rulesRetrieverMock) CreateAlertingRules() {
false, false,
log.NewNopLogger(), log.NewNopLogger(),
) )
rule4 := rules.NewAlertingRule(
"test_metric6",
expr2,
time.Second,
0,
labels.FromStrings("testlabel", "rule"),
labels.Labels{},
labels.Labels{},
"",
true,
log.NewNopLogger(),
)
rule5 := rules.NewAlertingRule(
"test_metric7",
expr2,
time.Second,
0,
labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
labels.Labels{},
labels.Labels{},
"",
true,
log.NewNopLogger(),
)
var r []*rules.AlertingRule var r []*rules.AlertingRule
r = append(r, rule1) r = append(r, rule1)
r = append(r, rule2) r = append(r, rule2)
r = append(r, rule3) r = append(r, rule3)
r = append(r, rule4)
r = append(r, rule5)
m.alertingRules = r m.alertingRules = r
} }
@ -300,7 +325,9 @@ func (m *rulesRetrieverMock) CreateRuleGroups() {
recordingExpr, err := parser.ParseExpr(`vector(1)`) recordingExpr, err := parser.ParseExpr(`vector(1)`)
require.NoError(m.testing, err, "unable to parse alert expression") require.NoError(m.testing, err, "unable to parse alert expression")
recordingRule := rules.NewRecordingRule("recording-rule-1", recordingExpr, labels.Labels{}) recordingRule := rules.NewRecordingRule("recording-rule-1", recordingExpr, labels.Labels{})
recordingRule2 := rules.NewRecordingRule("recording-rule-2", recordingExpr, labels.FromStrings("testlabel", "rule"))
r = append(r, recordingRule) r = append(r, recordingRule)
r = append(r, recordingRule2)
group := rules.NewGroup(rules.GroupOptions{ group := rules.NewGroup(rules.GroupOptions{
Name: "grp", Name: "grp",
@ -739,13 +766,16 @@ func TestLabelNames(t *testing.T) {
api := &API{ api := &API{
Queryable: storage, Queryable: storage,
} }
request := func(method string, matchers ...string) (*http.Request, error) { request := func(method, limit string, matchers ...string) (*http.Request, error) {
u, err := url.Parse("http://example.com") u, err := url.Parse("http://example.com")
require.NoError(t, err) require.NoError(t, err)
q := u.Query() q := u.Query()
for _, matcher := range matchers { for _, matcher := range matchers {
q.Add("match[]", matcher) q.Add("match[]", matcher)
} }
if limit != "" {
q.Add("limit", limit)
}
u.RawQuery = q.Encode() u.RawQuery = q.Encode()
r, err := http.NewRequest(method, u.String(), nil) r, err := http.NewRequest(method, u.String(), nil)
@ -759,6 +789,7 @@ func TestLabelNames(t *testing.T) {
name string name string
api *API api *API
matchers []string matchers []string
limit string
expected []string expected []string
expectedErrorType errorType expectedErrorType errorType
}{ }{
@ -773,6 +804,13 @@ func TestLabelNames(t *testing.T) {
expected: []string{"__name__", "abc", "foo", "xyz"}, expected: []string{"__name__", "abc", "foo", "xyz"},
api: api, api: api,
}, },
{
name: "non empty label matcher with limit",
matchers: []string{`{foo=~".+"}`},
expected: []string{"__name__", "abc"},
limit: "2",
api: api,
},
{ {
name: "exact label matcher", name: "exact label matcher",
matchers: []string{`{foo="boo"}`}, matchers: []string{`{foo="boo"}`},
@ -805,7 +843,7 @@ func TestLabelNames(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
for _, method := range []string{http.MethodGet, http.MethodPost} { for _, method := range []string{http.MethodGet, http.MethodPost} {
ctx := context.Background() ctx := context.Background()
req, err := request(method, tc.matchers...) req, err := request(method, tc.limit, tc.matchers...)
require.NoError(t, err) require.NoError(t, err)
res := tc.api.labelNames(req.WithContext(ctx)) res := tc.api.labelNames(req.WithContext(ctx))
assertAPIError(t, res.err, tc.expectedErrorType) assertAPIError(t, res.err, tc.expectedErrorType)
@ -1430,6 +1468,15 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
responseLen: 2, // API does not specify which particular value will come back. responseLen: 2, // API does not specify which particular value will come back.
warningsCount: 0, // No warnings if limit isn't exceeded. warningsCount: 0, // No warnings if limit isn't exceeded.
}, },
{
endpoint: api.series,
query: url.Values{
"match[]": []string{"test_metric1"},
"limit": []string{"0"},
},
responseLen: 2, // API does not specify which particular value will come back.
warningsCount: 0, // No warnings if limit isn't exceeded.
},
// Missing match[] query params in series requests. // Missing match[] query params in series requests.
{ {
endpoint: api.series, endpoint: api.series,
@ -2151,6 +2198,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok", Health: "ok",
Type: "alerting", Type: "alerting",
}, },
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
RecordingRule{ RecordingRule{
Name: "recording-rule-1", Name: "recording-rule-1",
Query: "vector(1)", Query: "vector(1)",
@ -2158,6 +2227,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok", Health: "ok",
Type: "recording", Type: "recording",
}, },
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
}, },
}, },
}, },
@ -2210,6 +2286,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok", Health: "ok",
Type: "alerting", Type: "alerting",
}, },
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: nil,
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: nil,
Health: "ok",
Type: "alerting",
},
RecordingRule{ RecordingRule{
Name: "recording-rule-1", Name: "recording-rule-1",
Query: "vector(1)", Query: "vector(1)",
@ -2217,6 +2315,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok", Health: "ok",
Type: "recording", Type: "recording",
}, },
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
}, },
}, },
}, },
@ -2276,6 +2381,28 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok", Health: "ok",
Type: "alerting", Type: "alerting",
}, },
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
}, },
}, },
}, },
@ -2302,6 +2429,13 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
Health: "ok", Health: "ok",
Type: "recording", Type: "recording",
}, },
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
}, },
}, },
}, },
@ -2369,6 +2503,179 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
}, },
zeroFunc: rulesZeroFunc, zeroFunc: rulesZeroFunc,
}, },
{
endpoint: api.rules,
query: url.Values{
"match[]": []string{`{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"type": []string{"alert"},
"match[]": []string{`{templatedlabel="{{ $externalURL }}"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric7",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("templatedlabel", "{{ $externalURL }}"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"match[]": []string{`{testlabel="abc"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{},
},
},
// This is testing OR condition, the api response should return rule if it matches one of the label selector
{
endpoint: api.rules,
query: url.Values{
"match[]": []string{`{testlabel="abc"}`, `{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"type": []string{"record"},
"match[]": []string{`{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
RecordingRule{
Name: "recording-rule-2",
Query: "vector(1)",
Labels: labels.FromStrings("testlabel", "rule"),
Health: "ok",
Type: "recording",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{
endpoint: api.rules,
query: url.Values{
"type": []string{"alert"},
"match[]": []string{`{testlabel="rule"}`},
},
response: &RuleDiscovery{
RuleGroups: []*RuleGroup{
{
Name: "grp",
File: "/path/to/file",
Interval: 1,
Limit: 0,
Rules: []Rule{
AlertingRule{
State: "inactive",
Name: "test_metric6",
Query: "up == 1",
Duration: 1,
Labels: labels.FromStrings("testlabel", "rule"),
Annotations: labels.Labels{},
Alerts: []*Alert{},
Health: "ok",
Type: "alerting",
},
},
},
},
},
zeroFunc: rulesZeroFunc,
},
{ {
endpoint: api.queryExemplars, endpoint: api.queryExemplars,
query: url.Values{ query: url.Values{

View file

@ -171,11 +171,11 @@ type errorTestQuerier struct {
err error err error
} }
func (t errorTestQuerier) LabelValues(context.Context, string, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (t errorTestQuerier) LabelValues(context.Context, string, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err return nil, nil, t.err
} }
func (t errorTestQuerier) LabelNames(context.Context, ...*labels.Matcher) ([]string, annotations.Annotations, error) { func (t errorTestQuerier) LabelNames(context.Context, *storage.LabelHints, ...*labels.Matcher) ([]string, annotations.Annotations, error) {
return nil, nil, t.err return nil, nil, t.err
} }

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.53.0", "version": "0.53.1",
"description": "a CodeMirror mode for the PromQL language", "description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts", "types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js", "module": "dist/esm/index.js",
@ -29,15 +29,15 @@
}, },
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.53.0", "@prometheus-io/lezer-promql": "0.53.1",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {
"@codemirror/autocomplete": "^6.16.2", "@codemirror/autocomplete": "^6.17.0",
"@codemirror/language": "^6.10.2", "@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0", "@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3", "@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.22.1", "@codemirror/view": "^6.28.3",
"@lezer/common": "^1.2.1", "@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0", "@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1", "@lezer/lr": "^1.4.1",

View file

@ -1,6 +1,6 @@
{ {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.53.0", "version": "0.53.1",
"description": "lezer-based PromQL grammar", "description": "lezer-based PromQL grammar",
"main": "dist/index.cjs", "main": "dist/index.cjs",
"type": "module", "type": "module",

View file

@ -1,19 +1,19 @@
{ {
"name": "prometheus-io", "name": "prometheus-io",
"version": "0.53.0", "version": "0.53.1",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "prometheus-io", "name": "prometheus-io",
"version": "0.53.0", "version": "0.53.1",
"workspaces": [ "workspaces": [
"react-app", "react-app",
"module/*" "module/*"
], ],
"devDependencies": { "devDependencies": {
"@types/jest": "^29.5.12", "@types/jest": "^29.5.12",
"@types/node": "^20.14.2", "@types/node": "^20.14.9",
"eslint-config-prettier": "^9.1.0", "eslint-config-prettier": "^9.1.0",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^4.2.1", "eslint-plugin-prettier": "^4.2.1",
@ -21,7 +21,7 @@
"jest-fetch-mock": "^3.0.3", "jest-fetch-mock": "^3.0.3",
"prettier": "^2.8.8", "prettier": "^2.8.8",
"react-scripts": "^5.0.1", "react-scripts": "^5.0.1",
"ts-jest": "^29.1.4", "ts-jest": "^29.2.2",
"typescript": "^4.9.5" "typescript": "^4.9.5"
}, },
"engines": { "engines": {
@ -30,18 +30,18 @@
}, },
"module/codemirror-promql": { "module/codemirror-promql": {
"name": "@prometheus-io/codemirror-promql", "name": "@prometheus-io/codemirror-promql",
"version": "0.53.0", "version": "0.53.1",
"license": "Apache-2.0", "license": "Apache-2.0",
"dependencies": { "dependencies": {
"@prometheus-io/lezer-promql": "0.53.0", "@prometheus-io/lezer-promql": "0.53.1",
"lru-cache": "^7.18.3" "lru-cache": "^7.18.3"
}, },
"devDependencies": { "devDependencies": {
"@codemirror/autocomplete": "^6.16.2", "@codemirror/autocomplete": "^6.17.0",
"@codemirror/language": "^6.10.2", "@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0", "@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.3.3", "@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.22.1", "@codemirror/view": "^6.28.3",
"@lezer/common": "^1.2.1", "@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0", "@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.1", "@lezer/lr": "^1.4.1",
@ -69,7 +69,7 @@
}, },
"module/lezer-promql": { "module/lezer-promql": {
"name": "@prometheus-io/lezer-promql", "name": "@prometheus-io/lezer-promql",
"version": "0.53.0", "version": "0.53.1",
"license": "Apache-2.0", "license": "Apache-2.0",
"devDependencies": { "devDependencies": {
"@lezer/generator": "^1.7.0", "@lezer/generator": "^1.7.0",
@ -2027,9 +2027,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/@codemirror/autocomplete": { "node_modules/@codemirror/autocomplete": {
"version": "6.16.2", "version": "6.17.0",
"resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.16.2.tgz", "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz",
"integrity": "sha512-MjfDrHy0gHKlPWsvSsikhO1+BOh+eBHNgfH1OXs1+DAf30IonQldgMM3kxLDTG9ktE7kDLaA1j/l7KMPA4KNfw==", "integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==",
"dependencies": { "dependencies": {
"@codemirror/language": "^6.0.0", "@codemirror/language": "^6.0.0",
"@codemirror/state": "^6.0.0", "@codemirror/state": "^6.0.0",
@ -2068,9 +2068,9 @@
} }
}, },
"node_modules/@codemirror/lint": { "node_modules/@codemirror/lint": {
"version": "6.8.0", "version": "6.8.1",
"resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.0.tgz", "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz",
"integrity": "sha512-lsFofvaw0lnPRJlQylNsC4IRt/1lI4OD/yYslrSGVndOJfStc58v+8p9dgGiD90ktOfL7OhBWns1ZETYgz0EJA==", "integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==",
"dependencies": { "dependencies": {
"@codemirror/state": "^6.0.0", "@codemirror/state": "^6.0.0",
"@codemirror/view": "^6.0.0", "@codemirror/view": "^6.0.0",
@ -2093,9 +2093,9 @@
"integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A=="
}, },
"node_modules/@codemirror/view": { "node_modules/@codemirror/view": {
"version": "6.27.0", "version": "6.28.3",
"resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.27.0.tgz", "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.3.tgz",
"integrity": "sha512-8kqX1sHbVW1lVzWwrjAbh4dR7eKhV8eIQ952JKaBXOoXE04WncoqCy4DMU701LSrPZ3N2Q4zsTawz7GQ+2mrUw==", "integrity": "sha512-QVqP+ko078/h9yrW+u5grX3rQhC+BkGKADRrlDaJznfPngJOv5zObiVf0+SgAWhL/Yt0nvZ+10rO3L+gU5IbFw==",
"dependencies": { "dependencies": {
"@codemirror/state": "^6.4.0", "@codemirror/state": "^6.4.0",
"style-mod": "^4.1.0", "style-mod": "^4.1.0",
@ -4199,9 +4199,9 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/@types/node": { "node_modules/@types/node": {
"version": "20.14.2", "version": "20.14.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.2.tgz", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.9.tgz",
"integrity": "sha512-xyu6WAMVwv6AKFLB+e/7ySZVr/0zLCzOa7rSpq6jNwpqOrUbcACDWC+53d4n2QHOnDou0fbIsg8wZu/sxrnI4Q==", "integrity": "sha512-06OCtnTXtWOZBJlRApleWndH4JsRVs1pDCc8dLSQp+7PpUpX3ePdHyeNSFTeSe7FtKyQkrlPvHwJOW3SLd8Oyg==",
"dependencies": { "dependencies": {
"undici-types": "~5.26.4" "undici-types": "~5.26.4"
} }
@ -16807,9 +16807,9 @@
"license": "CC0-1.0" "license": "CC0-1.0"
}, },
"node_modules/sass": { "node_modules/sass": {
"version": "1.77.4", "version": "1.77.6",
"resolved": "https://registry.npmjs.org/sass/-/sass-1.77.4.tgz", "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.6.tgz",
"integrity": "sha512-vcF3Ckow6g939GMA4PeU7b2K/9FALXk2KF9J87txdHzXbUF9XRQRwSxcAs/fGaTnJeBFd7UoV22j3lzMLdM0Pw==", "integrity": "sha512-ByXE1oLD79GVq9Ht1PeHWCPMPB8XHpBuz1r85oByKHjZY6qV6rWnQovQzXJXuQ/XyE1Oj3iPk3lo28uzaRA2/Q==",
"dependencies": { "dependencies": {
"chokidar": ">=3.0.0 <4.0.0", "chokidar": ">=3.0.0 <4.0.0",
"immutable": "^4.0.0", "immutable": "^4.0.0",
@ -18028,12 +18028,13 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/ts-jest": { "node_modules/ts-jest": {
"version": "29.1.4", "version": "29.2.2",
"resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.1.4.tgz", "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.2.2.tgz",
"integrity": "sha512-YiHwDhSvCiItoAgsKtoLFCuakDzDsJ1DLDnSouTaTmdOcOwIkSzbLXduaQ6M5DRVhuZC/NYaaZ/mtHbWMv/S6Q==", "integrity": "sha512-sSW7OooaKT34AAngP6k1VS669a0HdLxkQZnlC7T76sckGCokXFnvJ3yRlQZGRTAoV5K19HfSgCiSwWOSIfcYlg==",
"dev": true, "dev": true,
"dependencies": { "dependencies": {
"bs-logger": "0.x", "bs-logger": "0.x",
"ejs": "^3.0.0",
"fast-json-stable-stringify": "2.x", "fast-json-stable-stringify": "2.x",
"jest-util": "^29.0.0", "jest-util": "^29.0.0",
"json5": "^2.2.3", "json5": "^2.2.3",
@ -19331,15 +19332,15 @@
}, },
"react-app": { "react-app": {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.53.0", "version": "0.53.1",
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.16.2", "@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0", "@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2", "@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0", "@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6", "@codemirror/search": "^6.5.6",
"@codemirror/state": "^6.3.3", "@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.22.1", "@codemirror/view": "^6.28.3",
"@forevolve/bootstrap-dark": "^4.0.2", "@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2",
@ -19349,7 +19350,7 @@
"@lezer/lr": "^1.4.1", "@lezer/lr": "^1.4.1",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.53.0", "@prometheus-io/codemirror-promql": "0.53.1",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^9.0.6", "downshift": "^9.0.6",
@ -19368,7 +19369,7 @@
"react-test-renderer": "^17.0.2", "react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1", "reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0", "sanitize-html": "^2.13.0",
"sass": "1.77.4", "sass": "1.77.6",
"tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3" "tempusdominus-core": "^5.19.3"
}, },

View file

@ -17,7 +17,7 @@
}, },
"devDependencies": { "devDependencies": {
"@types/jest": "^29.5.12", "@types/jest": "^29.5.12",
"@types/node": "^20.14.2", "@types/node": "^20.14.9",
"eslint-config-prettier": "^9.1.0", "eslint-config-prettier": "^9.1.0",
"eslint-config-react-app": "^7.0.1", "eslint-config-react-app": "^7.0.1",
"eslint-plugin-prettier": "^4.2.1", "eslint-plugin-prettier": "^4.2.1",
@ -25,8 +25,8 @@
"jest-fetch-mock": "^3.0.3", "jest-fetch-mock": "^3.0.3",
"prettier": "^2.8.8", "prettier": "^2.8.8",
"react-scripts": "^5.0.1", "react-scripts": "^5.0.1",
"ts-jest": "^29.1.4", "ts-jest": "^29.2.2",
"typescript": "^4.9.5" "typescript": "^4.9.5"
}, },
"version": "0.53.0" "version": "0.53.1"
} }

View file

@ -1,15 +1,15 @@
{ {
"name": "@prometheus-io/app", "name": "@prometheus-io/app",
"version": "0.53.0", "version": "0.53.1",
"private": true, "private": true,
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.16.2", "@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0", "@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2", "@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.0", "@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6", "@codemirror/search": "^6.5.6",
"@codemirror/state": "^6.3.3", "@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.22.1", "@codemirror/view": "^6.28.3",
"@forevolve/bootstrap-dark": "^4.0.2", "@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2",
@ -19,7 +19,7 @@
"@lezer/lr": "^1.4.1", "@lezer/lr": "^1.4.1",
"@nexucis/fuzzy": "^0.4.1", "@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1", "@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.53.0", "@prometheus-io/codemirror-promql": "0.53.1",
"bootstrap": "^4.6.2", "bootstrap": "^4.6.2",
"css.escape": "^1.5.1", "css.escape": "^1.5.1",
"downshift": "^9.0.6", "downshift": "^9.0.6",
@ -38,7 +38,7 @@
"react-test-renderer": "^17.0.2", "react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1", "reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0", "sanitize-html": "^2.13.0",
"sass": "1.77.4", "sass": "1.77.6",
"tempusdominus-bootstrap-4": "^5.39.2", "tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3" "tempusdominus-core": "^5.19.3"
}, },