Merge remote-tracking branch 'prometheus/main' into feat/info-function-mvp

Signed-off-by: Arve Knudsen <arve.knudsen@gmail.com>
This commit is contained in:
Arve Knudsen 2024-09-19 08:26:17 +02:00
commit 6971656733
78 changed files with 4356 additions and 1243 deletions

View file

@ -3,6 +3,7 @@
## unreleased
* [FEATURE] PromQL: Add `info` function. #14495
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
## 3.0.0-beta.0 / 2024-09-05

View file

@ -30,6 +30,11 @@ include Makefile.common
DOCKER_IMAGE_NAME ?= prometheus
# Only build UI if PREBUILT_ASSETS_STATIC_DIR is not set
ifdef PREBUILT_ASSETS_STATIC_DIR
SKIP_UI_BUILD = true
endif
.PHONY: update-npm-deps
update-npm-deps:
@echo ">> updating npm dependencies"
@ -75,8 +80,24 @@ ui-lint:
cd $(UI_PATH)/react-app && npm run lint
.PHONY: assets
ifndef SKIP_UI_BUILD
assets: ui-install ui-build
.PHONY: npm_licenses
npm_licenses: ui-install
@echo ">> bundling npm licenses"
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
ln -s . npm_licenses
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
rm -f npm_licenses
else
assets:
@echo '>> skipping assets build, pre-built assets provided'
npm_licenses:
@echo '>> skipping assets npm licenses, pre-built assets provided'
endif
.PHONY: assets-compress
assets-compress: assets
@echo '>> compressing assets'
@ -125,14 +146,6 @@ else
test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
endif
.PHONY: npm_licenses
npm_licenses: ui-install
@echo ">> bundling npm licenses"
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
ln -s . npm_licenses
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
rm -f npm_licenses
.PHONY: tarball
tarball: npm_licenses common-tarball

View file

@ -275,3 +275,9 @@ $(1)_precheck:
exit 1; \
fi
endef
govulncheck: install-govulncheck
govulncheck ./...
install-govulncheck:
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -230,6 +230,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "ooo-native-histograms":
c.tsdb.EnableOOONativeHistograms = true
level.Info(logger).Log("msg", "Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
case "created-timestamp-zero-ingestion":
c.scrape.EnableCreatedTimestampZeroIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
@ -471,7 +474,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
@ -980,7 +983,7 @@ func main() {
},
func(err error) {
close(cancel)
webHandler.SetReady(false)
webHandler.SetReady(web.Stopping)
},
)
}
@ -1159,7 +1162,7 @@ func main() {
reloadReady.Close()
webHandler.SetReady(true)
webHandler.SetReady(web.Ready)
level.Info(logger).Log("msg", "Server is ready to receive web requests.")
<-cancel
return nil
@ -1735,6 +1738,7 @@ type tsdbOptions struct {
EnableNativeHistograms bool
EnableDelayedCompaction bool
EnableOverlappingCompaction bool
EnableOOONativeHistograms bool
}
func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
@ -1754,6 +1758,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
MaxExemplars: opts.MaxExemplars,
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms,
EnableOOONativeHistograms: opts.EnableOOONativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableDelayedCompaction: opts.EnableDelayedCompaction,
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,

View file

@ -56,7 +56,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -266,9 +266,11 @@ params:
# Sets the `Authorization` header on every scrape request with the
# configured username and password.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -684,11 +686,13 @@ subscription_id: <string>
# Authentication information used to authenticate to the Azure API.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `username` and `username_file` are mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information, currently not support by Azure.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -809,11 +813,13 @@ tags:
# Authentication information used to authenticate to the consul server.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `username` and `username_file` are mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -907,11 +913,13 @@ The following meta labels are available on targets during [relabeling](#relabel_
# Authentication information used to authenticate to the API server.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information, not currently supported by DigitalOcean.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -1055,11 +1063,13 @@ tls_config:
# Authentication information used to authenticate to the Docker daemon.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -1244,11 +1254,13 @@ role: <string>
# Authentication information used to authenticate to the Docker daemon.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -1391,11 +1403,13 @@ filters:
# Authentication information used to authenticate to the EC2 API.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `username` and `username_file` are mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information, currently not supported by AWS.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -1684,6 +1698,7 @@ tls_config:
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -1915,12 +1930,14 @@ role: <string>
# Authentication information used to authenticate to the API server.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information, required when role is robot
# Role hcloud does not support basic auth.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2024,11 +2041,13 @@ url: <string>
# Authentication information used to authenticate to the API server.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `username` and `username_file` are mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2116,12 +2135,14 @@ datacenter_id: <string>
# Authentication information used to authenticate to the API server.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information, required when using IONOS
# Cloud username and password as authentication method.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2368,11 +2389,13 @@ role: <string>
# Optional authentication information used to authenticate to the API server.
# Note that `basic_auth` and `authorization` options are mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2526,11 +2549,13 @@ tls_config:
# Authentication information used to authenticate to the Docker daemon.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2611,11 +2636,13 @@ See below for the configuration options for Lightsail discovery:
# Authentication information used to authenticate to the Lightsail API.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `username` and `username_file` are mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information, currently not supported by AWS.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2707,12 +2734,14 @@ The following meta labels are available on targets during [relabeling](#relabel_
# Authentication information used to authenticate to the API server.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Note: Linode APIv4 Token must be created with scopes: 'linodes:read_only', 'ips:read_only', and 'events:read_only'
# Optional HTTP basic authentication information, not currently supported by Linode APIv4.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2820,9 +2849,11 @@ servers:
# Sets the `Authorization` header on every request with the
# configured username and password.
# This is mutually exclusive with other authentication mechanisms.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -2942,11 +2973,13 @@ The following meta labels are available on targets during [relabeling](#relabel_
# Authentication information used to authenticate to the nomad server.
# Note that `basic_auth`, `authorization` and `oauth2` options are
# mutually exclusive.
# `username` and `username_file` are mutually exclusive.
# `password` and `password_file` are mutually exclusive.
# Optional HTTP basic authentication information.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -3137,9 +3170,11 @@ server: <string>
# Sets the `Authorization` header on every request with the
# configured username and password.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -3370,6 +3405,7 @@ password: <secret>
# Optional HTTP basic authentication information, currently not supported by Uyuni.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -3459,11 +3495,13 @@ The following meta labels are available on targets during [relabeling](#relabel_
# Authentication information used to authenticate to the API server.
# Note that `basic_auth` and `authorization` options are
# mutually exclusive.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
# Optional HTTP basic authentication information, not currently supported by Vultr.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -3675,9 +3713,11 @@ through the `__alerts_path__` label.
# Sets the `Authorization` header on every request with the
# configured username and password.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -3921,9 +3961,11 @@ write_relabel_configs:
# Sets the `Authorization` header on every remote write request with the
# configured username and password.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]
@ -4102,9 +4144,11 @@ headers:
# Sets the `Authorization` header on every remote read request with the
# configured username and password.
# username and username_file are mutually exclusive.
# password and password_file are mutually exclusive.
basic_auth:
[ username: <string> ]
[ username_file: <string> ]
[ password: <secret> ]
[ password_file: <string> ]

View file

@ -47,16 +47,6 @@ When enabled, for each instance scrape, Prometheus stores a sample in the follow
to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`.
## Prometheus agent
`--enable-feature=agent`
When enabled, Prometheus runs in agent mode. The agent mode is limited to
discovery, scrape and remote write.
This is useful when you do not need to query the Prometheus data locally, but
only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
## Per-step stats
`--enable-feature=promql-per-step-stats`

2
go.mod
View file

@ -27,7 +27,7 @@ require (
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.6.0
github.com/go-openapi/strfmt v0.23.0
github.com/go-zookeeper/zk v1.0.3
github.com/go-zookeeper/zk v1.0.4
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.6.0

4
go.sum
View file

@ -238,8 +238,8 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=

View file

@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
// available, even if the string matcher is faster.
m.matchString = m.stringMatcher.Matches
} else {
parsed, err := syntax.Parse(v, syntax.Perl)
parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL)
if err != nil {
return nil, err
}
// Simplify the syntax tree to run faster.
parsed = parsed.Simplify()
m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$")
m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$")
if err != nil {
return nil, err
}

View file

@ -121,7 +121,7 @@ func TestFastRegexMatcher_MatchString(t *testing.T) {
t.Parallel()
m, err := NewFastRegexMatcher(r)
require.NoError(t, err)
re := regexp.MustCompile("^(?:" + r + ")$")
re := regexp.MustCompile("^(?s:" + r + ")$")
require.Equal(t, re.MatchString(v), m.MatchString(v))
})
}
@ -167,7 +167,7 @@ func TestOptimizeConcatRegex(t *testing.T) {
}
for _, c := range cases {
parsed, err := syntax.Parse(c.regex, syntax.Perl)
parsed, err := syntax.Parse(c.regex, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
prefix, suffix, contains := optimizeConcatRegex(parsed)
@ -248,7 +248,7 @@ func TestFindSetMatches(t *testing.T) {
c := c
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matches, actualCaseSensitive := findSetMatches(parsed)
require.Equal(t, c.expMatches, matches)
@ -348,15 +348,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
pattern string
exp StringMatcher
}{
{".*", anyStringWithoutNewlineMatcher{}},
{".*?", anyStringWithoutNewlineMatcher{}},
{".*", trueMatcher{}},
{".*?", trueMatcher{}},
{"(?s:.*)", trueMatcher{}},
{"(.*)", anyStringWithoutNewlineMatcher{}},
{"^.*$", anyStringWithoutNewlineMatcher{}},
{".+", &anyNonEmptyStringMatcher{matchNL: false}},
{"(.*)", trueMatcher{}},
{"^.*$", trueMatcher{}},
{".+", &anyNonEmptyStringMatcher{matchNL: true}},
{"(?s:.+)", &anyNonEmptyStringMatcher{matchNL: true}},
{"^.+$", &anyNonEmptyStringMatcher{matchNL: false}},
{"(.+)", &anyNonEmptyStringMatcher{matchNL: false}},
{"^.+$", &anyNonEmptyStringMatcher{matchNL: true}},
{"(.+)", &anyNonEmptyStringMatcher{matchNL: true}},
{"", emptyStringMatcher{}},
{"^$", emptyStringMatcher{}},
{"^foo$", &equalStringMatcher{s: "foo", caseSensitive: true}},
@ -366,23 +366,23 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{`(?i:((foo1|foo2|bar)))`, orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})},
{"^((?i:foo|oo)|(bar))$", orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO", caseSensitive: false}, &equalStringMatcher{s: "OO", caseSensitive: false}, &equalStringMatcher{s: "bar", caseSensitive: true}})},
{"(?i:(foo1|foo2|bar))", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "FOO1", caseSensitive: false}, &equalStringMatcher{s: "FOO2", caseSensitive: false}}), &equalStringMatcher{s: "BAR", caseSensitive: false}})},
{".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: anyStringWithoutNewlineMatcher{}}},
{"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: anyStringWithoutNewlineMatcher{}}},
{"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{".*foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"(.*)foo.*", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"(.*)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"(.+)foo(.*)", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: trueMatcher{}}},
{"^.+foo.+", &containsStringMatcher{substrings: []string{"foo"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^(.*)(foo)(.*)$", &containsStringMatcher{substrings: []string{"foo"}, left: trueMatcher{}, right: trueMatcher{}}},
{"^(.*)(foo|foobar)(.*)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: trueMatcher{}}},
{"^(.*)(foo|foobar)(.+)$", &containsStringMatcher{substrings: []string{"foo", "foobar"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^(.*)(bar|b|buzz)(.+)$", &containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"10\\.0\\.(1|2)\\.+", nil},
{"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{}, suffix: "foo", suffixCaseSensitive: true}},
{"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: anyStringWithoutNewlineMatcher{}}},
{"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: false}}},
{"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: anyStringWithoutNewlineMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: false}}, &equalStringMatcher{s: "foo", caseSensitive: true}})},
{"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: false}}})},
{"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: false}, right: nil}},
{"10\\.0\\.(1|2).+", &containsStringMatcher{substrings: []string{"10.0.1", "10.0.2"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^.+foo", &literalSuffixStringMatcher{left: &anyNonEmptyStringMatcher{matchNL: true}, suffix: "foo", suffixCaseSensitive: true}},
{"foo-.*$", &literalPrefixSensitiveStringMatcher{prefix: "foo-", right: trueMatcher{}}},
{"(prometheus|api_prom)_api_v1_.+", &containsStringMatcher{substrings: []string{"prometheus_api_v1_", "api_prom_api_v1_"}, left: nil, right: &anyNonEmptyStringMatcher{matchNL: true}}},
{"^((.*)(bar|b|buzz)(.+)|foo)$", orStringMatcher([]StringMatcher{&containsStringMatcher{substrings: []string{"bar", "b", "buzz"}, left: trueMatcher{}, right: &anyNonEmptyStringMatcher{matchNL: true}}, &equalStringMatcher{s: "foo", caseSensitive: true}})},
{"((fo(bar))|.+foo)", orStringMatcher([]StringMatcher{orStringMatcher([]StringMatcher{&equalStringMatcher{s: "fobar", caseSensitive: true}}), &literalSuffixStringMatcher{suffix: "foo", suffixCaseSensitive: true, left: &anyNonEmptyStringMatcher{matchNL: true}}})},
{"(.+)/(gateway|cortex-gw|cortex-gw-internal)", &containsStringMatcher{substrings: []string{"/gateway", "/cortex-gw", "/cortex-gw-internal"}, left: &anyNonEmptyStringMatcher{matchNL: true}, right: nil}},
// we don't support case insensitive matching for contains.
// This is because there's no strings.IndexOfFold function.
// We can revisit later if this is really popular by using strings.ToUpper.
@ -393,15 +393,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{".*foo.*bar.*", nil},
{`\d*`, nil},
{".", nil},
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: anyStringWithoutNewlineMatcher{}}}}},
{"/|/bar.*", &literalPrefixSensitiveStringMatcher{prefix: "/", right: orStringMatcher{emptyStringMatcher{}, &literalPrefixSensitiveStringMatcher{prefix: "bar", right: trueMatcher{}}}}},
// This one is not supported because `stringMatcherFromRegexp` is not reentrant for syntax.OpConcat.
// It would make the code too complex to handle it.
{"(.+)/(foo.*|bar$)", nil},
// Case sensitive alternate with same literal prefix and .* suffix.
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: anyStringWithoutNewlineMatcher{}}}}},
{"(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixSensitiveStringMatcher{prefix: "xyz-016a-ixb-", right: orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "dp", right: trueMatcher{}}, &literalPrefixSensitiveStringMatcher{prefix: "op", right: trueMatcher{}}}}},
// Case insensitive alternate with same literal prefix and .* suffix.
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: anyStringWithoutNewlineMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: anyStringWithoutNewlineMatcher{}}}}},
{"(?i:(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*))", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}},
{"(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)", &literalPrefixInsensitiveStringMatcher{prefix: "XYZ-016A-IXB-", right: orStringMatcher{&literalPrefixInsensitiveStringMatcher{prefix: "DP", right: trueMatcher{}}, &literalPrefixInsensitiveStringMatcher{prefix: "OP", right: trueMatcher{}}}}},
// Concatenated variable length selectors are not supported.
{"foo.*.*", nil},
{"foo.+.+", nil},
@ -410,15 +410,15 @@ func TestStringMatcherFromRegexp(t *testing.T) {
{"aaa.?.?", nil},
{"aaa.?.*", nil},
// Regexps with ".?".
{"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"ext.?|xfs", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"(?s)(ext.?|xfs)", orStringMatcher{&literalPrefixSensitiveStringMatcher{prefix: "ext", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}, &equalStringMatcher{s: "xfs", caseSensitive: true}}},
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: false}}},
{"foo.?", &literalPrefixSensitiveStringMatcher{prefix: "foo", right: &zeroOrOneCharacterStringMatcher{matchNL: true}}},
{"f.?o", nil},
} {
c := c
t.Run(c.pattern, func(t *testing.T) {
t.Parallel()
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matches := stringMatcherFromRegexp(parsed)
require.Equal(t, c.exp, matches)
@ -437,16 +437,16 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
{
pattern: "(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)",
expectedLiteralPrefixMatchers: 3,
expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "xyz-016a-ixb-dp\n"},
expectedMatches: []string{"xyz-016a-ixb-dp", "xyz-016a-ixb-dpXXX", "xyz-016a-ixb-op", "xyz-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "xyz-016a-ixb-d", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"},
},
// Case insensitive.
{
pattern: "(?i)(xyz-016a-ixb-dp.*|xyz-016a-ixb-op.*)",
expectedLiteralPrefixMatchers: 3,
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX"},
expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp", "xyz-016a-ixb-dp\n"},
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dpXXX", "xyz-016a-ixb-op", "XYZ-016a-ixb-opXXX", "xyz-016a-ixb-dp\n"},
expectedNotMatches: []string{"xyz-016a-ixb-d", "xyz", "dp"},
},
// Nested literal prefixes, case sensitive.
@ -474,13 +474,13 @@ func TestStringMatcherFromRegexp_LiteralPrefix(t *testing.T) {
},
} {
t.Run(c.pattern, func(t *testing.T) {
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matcher := stringMatcherFromRegexp(parsed)
require.NotNil(t, matcher)
re := regexp.MustCompile("^" + c.pattern + "$")
re := regexp.MustCompile("^(?s:" + c.pattern + ")$")
// Pre-condition check: ensure it contains literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher.
numPrefixMatchers := 0
@ -523,16 +523,16 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) {
{
pattern: "(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)",
expectedLiteralSuffixMatchers: 2,
expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"},
expectedMatches: []string{"xyz-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "xyz-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"},
expectedNotMatches: []string{"XYZ-016a-ixb-dp", "yz-016a-ixb-dp", "XYZ-016a-ixb-op", "xyz-016a-ixb-o", "xyz", "dp"},
},
// Case insensitive.
{
pattern: "(?i)(.*xyz-016a-ixb-dp|.*xyz-016a-ixb-op)",
expectedLiteralSuffixMatchers: 2,
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op"},
expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp", "\nxyz-016a-ixb-dp"},
expectedMatches: []string{"xyz-016a-ixb-dp", "XYZ-016a-ixb-dp", "XXXxyz-016a-ixb-dp", "XyZ-016a-ixb-op", "XXXxyz-016a-ixb-op", "\nxyz-016a-ixb-dp"},
expectedNotMatches: []string{"yz-016a-ixb-dp", "xyz-016a-ixb-o", "xyz", "dp"},
},
// Nested literal suffixes, case sensitive.
@ -552,13 +552,13 @@ func TestStringMatcherFromRegexp_LiteralSuffix(t *testing.T) {
},
} {
t.Run(c.pattern, func(t *testing.T) {
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matcher := stringMatcherFromRegexp(parsed)
require.NotNil(t, matcher)
re := regexp.MustCompile("^" + c.pattern + "$")
re := regexp.MustCompile("^(?s:" + c.pattern + ")$")
// Pre-condition check: ensure it contains literalSuffixStringMatcher.
numSuffixMatchers := 0
@ -598,26 +598,26 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) {
{
pattern: "test.?",
expectedZeroOrOneMatchers: 1,
expectedMatches: []string{"test", "test!"},
expectedNotMatches: []string{"test\n", "tes", "test!!"},
expectedMatches: []string{"test\n", "test", "test!"},
expectedNotMatches: []string{"tes", "test!!"},
},
{
pattern: ".?test",
expectedZeroOrOneMatchers: 1,
expectedMatches: []string{"test", "!test"},
expectedNotMatches: []string{"\ntest", "tes", "test!"},
expectedMatches: []string{"\ntest", "test", "!test"},
expectedNotMatches: []string{"tes", "test!"},
},
{
pattern: "(aaa.?|bbb.?)",
expectedZeroOrOneMatchers: 2,
expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX"},
expectedNotMatches: []string{"aa", "aaaXX", "aaa\n", "bb", "bbbXX", "bbb\n"},
expectedMatches: []string{"aaa", "aaaX", "bbb", "bbbX", "aaa\n", "bbb\n"},
expectedNotMatches: []string{"aa", "aaaXX", "bb", "bbbXX"},
},
{
pattern: ".*aaa.?",
expectedZeroOrOneMatchers: 1,
expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX"},
expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX", "XXXaaa\n"},
expectedMatches: []string{"aaa", "Xaaa", "aaaX", "XXXaaa", "XXXaaaX", "XXXaaa\n"},
expectedNotMatches: []string{"aa", "aaaXX", "XXXaaaXXX"},
},
// Match newline.
@ -632,18 +632,18 @@ func TestStringMatcherFromRegexp_Quest(t *testing.T) {
{
pattern: "(aaa.?|((?s).?bbb.+))",
expectedZeroOrOneMatchers: 2,
expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX"},
expectedNotMatches: []string{"aa", "aaa\n", "Xbbb", "\nbbb"},
expectedMatches: []string{"aaa", "aaaX", "bbbX", "XbbbX", "bbbXXX", "\nbbbX", "aaa\n"},
expectedNotMatches: []string{"aa", "Xbbb", "\nbbb"},
},
} {
t.Run(c.pattern, func(t *testing.T) {
parsed, err := syntax.Parse(c.pattern, syntax.Perl)
parsed, err := syntax.Parse(c.pattern, syntax.Perl|syntax.DotNL)
require.NoError(t, err)
matcher := stringMatcherFromRegexp(parsed)
require.NotNil(t, matcher)
re := regexp.MustCompile("^" + c.pattern + "$")
re := regexp.MustCompile("^(?s:" + c.pattern + ")$")
// Pre-condition check: ensure it contains zeroOrOneCharacterStringMatcher.
numZeroOrOneMatchers := 0
@ -1112,7 +1112,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) {
}
b.Logf("regexp: %s", re)
parsed, err := syntax.Parse(re, syntax.Perl)
parsed, err := syntax.Parse(re, syntax.Perl|syntax.DotNL)
require.NoError(b, err)
unoptimized := stringMatcherFromRegexpInternal(parsed)

View file

@ -171,7 +171,7 @@ type Regexp struct {
// NewRegexp creates a new anchored Regexp and returns an error if the
// passed-in regular expression does not compile.
func NewRegexp(s string) (Regexp, error) {
regex, err := regexp.Compile("^(?:" + s + ")$")
regex, err := regexp.Compile("^(?s:" + s + ")$")
return Regexp{Regexp: regex}, err
}
@ -218,8 +218,8 @@ func (re Regexp) String() string {
}
str := re.Regexp.String()
// Trim the anchor `^(?:` prefix and `)$` suffix.
return str[4 : len(str)-2]
// Trim the anchor `^(?s:` prefix and `)$` suffix.
return str[5 : len(str)-2]
}
// Process returns a relabeled version of the given label set. The relabel configurations

View file

@ -569,6 +569,29 @@ func TestRelabel(t *testing.T) {
},
drop: true,
},
{
input: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
}),
relabel: []*Config{
{
SourceLabels: model.LabelNames{"a"},
Regex: MustNewRegexp("line1.*line2"),
TargetLabel: "d",
Separator: ";",
Replacement: "match${1}",
Action: Replace,
},
},
output: labels.FromMap(map[string]string{
"a": "line1\nline2",
"b": "bar",
"c": "baz",
"d": "match",
}),
},
}
for _, test := range tests {

View file

@ -1819,9 +1819,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value,
ev.samplesStats.UpdatePeak(ev.currentSamples)
if e.Func.Name == "rate" || e.Func.Name == "increase" {
samples := inMatrix[0]
metricName := samples.Metric.Get(labels.MetricName)
if metricName != "" && len(samples.Floats) > 0 &&
metricName := inMatrix[0].Metric.Get(labels.MetricName)
if metricName != "" && len(ss.Floats) > 0 &&
!strings.HasSuffix(metricName, "_total") &&
!strings.HasSuffix(metricName, "_sum") &&
!strings.HasSuffix(metricName, "_count") &&

View file

@ -19,6 +19,7 @@ import (
"fmt"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
@ -3708,3 +3709,75 @@ histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:
},
})
}
func TestRateAnnotations(t *testing.T) {
testCases := map[string]struct {
data string
expr string
expectedWarningAnnotations []string
expectedInfoAnnotations []string
}{
"info annotation when two samples are selected": {
data: `
series 1 2
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{
`PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "series" (1:6)`,
},
},
"no info annotations when no samples": {
data: `
series
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{},
},
"no info annotations when selecting one sample": {
data: `
series 1 2
`,
expr: "rate(series[10s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{},
},
"no info annotations when no samples due to mixed data types": {
data: `
series{label="a"} 1 {{schema:1 sum:15 count:10 buckets:[1 2 3]}}
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{
`PromQL warning: encountered a mix of histograms and floats for metric name "series" (1:6)`,
},
expectedInfoAnnotations: []string{},
},
"no info annotations when selecting two native histograms": {
data: `
series{label="a"} {{schema:1 sum:10 count:5 buckets:[1 2 3]}} {{schema:1 sum:15 count:10 buckets:[1 2 3]}}
`,
expr: "rate(series[1m1s])",
expectedWarningAnnotations: []string{},
expectedInfoAnnotations: []string{},
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
store := promqltest.LoadedStorage(t, "load 1m\n"+strings.TrimSpace(testCase.data))
t.Cleanup(func() { _ = store.Close() })
engine := newTestEngine(t)
query, err := engine.NewInstantQuery(context.Background(), store, nil, testCase.expr, timestamp.Time(0).Add(1*time.Minute))
require.NoError(t, err)
t.Cleanup(query.Close)
res := query.Exec(context.Background())
require.NoError(t, res.Err)
warnings, infos := res.Warnings.AsStrings(testCase.expr, 0, 0)
testutil.RequireEqual(t, testCase.expectedWarningAnnotations, warnings)
testutil.RequireEqual(t, testCase.expectedInfoAnnotations, infos)
})
}
}

View file

@ -1480,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
regexStr = stringFromArg(args[4])
)
regex, err := regexp.Compile("^(?:" + regexStr + ")$")
regex, err := regexp.Compile("^(?s:" + regexStr + ")$")
if err != nil {
panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr))
}

View file

@ -4,6 +4,12 @@
set -euo pipefail
export STATIC_DIR=static
PREBUILT_ASSETS_STATIC_DIR=${PREBUILT_ASSETS_STATIC_DIR:-}
if [ -n "$PREBUILT_ASSETS_STATIC_DIR" ]; then
STATIC_DIR=$(realpath $PREBUILT_ASSETS_STATIC_DIR)
fi
cd web/ui
cp embed.go.tmpl embed.go
@ -11,6 +17,19 @@ GZIP_OPTS="-fk"
# gzip option '-k' may not always exist in the latest gzip available on different distros.
if ! gzip -k -h &>/dev/null; then GZIP_OPTS="-f"; fi
mkdir -p static
find static -type f -name '*.gz' -delete
find static -type f -exec gzip $GZIP_OPTS '{}' \; -print0 | xargs -0 -I % echo %.gz | sort | xargs echo //go:embed >> embed.go
# Compress files from the prebuilt static directory and replicate the structure in the current static directory
find "${STATIC_DIR}" -type f ! -name '*.gz' -exec bash -c '
for file; do
dest="${file#${STATIC_DIR}}"
mkdir -p "static/$(dirname "$dest")"
gzip '"$GZIP_OPTS"' "$file" -c > "static/${dest}.gz"
done
' bash {} +
# Append the paths of gzipped files to embed.go
find static -type f -name '*.gz' -print0 | sort -z | xargs -0 echo //go:embed >> embed.go
echo var EmbedFS embed.FS >> embed.go

View file

@ -43,6 +43,7 @@ var (
ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength)
ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0")
ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled")
ErrOOONativeHistogramsDisabled = fmt.Errorf("out-of-order native histogram ingestion is disabled")
// ErrOutOfOrderCT indicates failed append of CT to the storage
// due to CT being older the then newer sample.
@ -157,7 +158,7 @@ type ChunkQuerier interface {
// LabelQuerier provides querying access over labels.
type LabelQuerier interface {
// LabelValues returns all potential values for a label name.
// LabelValues returns all potential values for a label name in sorted order.
// It is not safe to use the strings beyond the lifetime of the querier.
// If matchers are specified the returned result set is reduced
// to label values of metrics matching the matchers.

View file

@ -1859,13 +1859,6 @@ func BenchmarkBuildWriteRequest(b *testing.B) {
}
pBuf := proto.NewBuffer(nil)
// Warmup buffers
for i := 0; i < 10; i++ {
populateTimeSeries(batch, seriesBuff, true, true)
buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, &buff, nil, "snappy")
}
b.ResetTimer()
totalSize := 0
for i := 0; i < b.N; i++ {
populateTimeSeries(batch, seriesBuff, true, true)
@ -1897,45 +1890,43 @@ func BenchmarkBuildWriteRequest(b *testing.B) {
func BenchmarkBuildV2WriteRequest(b *testing.B) {
noopLogger := log.NewNopLogger()
type testcase struct {
batch []timeSeries
}
testCases := []testcase{
{createDummyTimeSeries(2)},
{createDummyTimeSeries(10)},
{createDummyTimeSeries(100)},
}
for _, tc := range testCases {
bench := func(b *testing.B, batch []timeSeries) {
symbolTable := writev2.NewSymbolTable()
buff := make([]byte, 0)
seriesBuff := make([]writev2.TimeSeries, len(tc.batch))
seriesBuff := make([]writev2.TimeSeries, len(batch))
for i := range seriesBuff {
seriesBuff[i].Samples = []writev2.Sample{{}}
seriesBuff[i].Exemplars = []writev2.Exemplar{{}}
}
pBuf := []byte{}
// Warmup buffers
for i := 0; i < 10; i++ {
populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true)
buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
}
b.Run(fmt.Sprintf("%d-instances", len(tc.batch)), func(b *testing.B) {
totalSize := 0
for j := 0; j < b.N; j++ {
populateV2TimeSeries(&symbolTable, tc.batch, seriesBuff, true, true)
b.ResetTimer()
req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
if err != nil {
b.Fatal(err)
}
symbolTable.Reset()
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
totalSize := 0
for i := 0; i < b.N; i++ {
populateV2TimeSeries(&symbolTable, batch, seriesBuff, true, true)
req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, &buff, nil, "snappy")
if err != nil {
b.Fatal(err)
}
})
totalSize += len(req)
b.ReportMetric(float64(totalSize)/float64(b.N), "compressedSize/op")
}
}
twoBatch := createDummyTimeSeries(2)
tenBatch := createDummyTimeSeries(10)
hundredBatch := createDummyTimeSeries(100)
b.Run("2 instances", func(b *testing.B) {
bench(b, twoBatch)
})
b.Run("10 instances", func(b *testing.B) {
bench(b, tenBatch)
})
b.Run("1k instances", func(b *testing.B) {
bench(b, hundredBatch)
})
}
func TestDropOldTimeSeries(t *testing.T) {

View file

@ -334,7 +334,7 @@ func TestStreamReadEndpoint(t *testing.T) {
Type: prompb.Chunk_XOR,
MinTimeMs: 7200000,
MaxTimeMs: 7200000,
Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000\000"),
Data: []byte("\000\001\200\364\356\006@\307p\000\000\000\000\000"),
},
},
},
@ -381,7 +381,7 @@ func TestStreamReadEndpoint(t *testing.T) {
Type: prompb.Chunk_XOR,
MinTimeMs: 14400000,
MaxTimeMs: 14400000,
Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000\000"),
Data: []byte("\000\001\200\350\335\r@\327p\000\000\000\000\000"),
},
},
},

View file

@ -22,6 +22,7 @@ import (
"math/rand"
"os"
"path/filepath"
"slices"
"sort"
"strconv"
"testing"
@ -151,7 +152,7 @@ func TestCorruptedChunk(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.Truncate(fi.Size()-1))
},
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: segment doesn't include enough bytes to read the chunk - required:26, available:25"),
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: segment doesn't include enough bytes to read the chunk - required:25, available:24"),
},
{
name: "checksum mismatch",
@ -169,7 +170,7 @@ func TestCorruptedChunk(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, n)
},
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:cfc0526c, actual:34815eae"),
iterErr: errors.New("cannot populate chunk 8 from block 00000000000000000000000000: checksum mismatch expected:231bddcf, actual:d85ad10d"),
},
} {
t.Run(tc.name, func(t *testing.T) {
@ -310,6 +311,33 @@ func TestLabelValuesWithMatchers(t *testing.T) {
}
}
func TestBlockQuerierReturnsSortedLabelValues(t *testing.T) {
tmpdir := t.TempDir()
ctx := context.Background()
var seriesEntries []storage.Series
for i := 100; i > 0; i-- {
seriesEntries = append(seriesEntries, storage.NewListSeries(labels.FromStrings(
"__name__", fmt.Sprintf("value%d", i),
), []chunks.Sample{sample{100, 0, nil, nil}}))
}
blockDir := createBlock(t, tmpdir, seriesEntries)
// Check open err.
block, err := OpenBlock(nil, blockDir, nil)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, block.Close()) })
q, err := newBlockBaseQuerier(block, 0, 100)
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, q.Close()) })
res, _, err := q.LabelValues(ctx, "__name__", nil)
require.NoError(t, err)
require.True(t, slices.IsSorted(res))
}
// TestBlockSize ensures that the block size is calculated correctly.
func TestBlockSize(t *testing.T) {
tmpdir := t.TempDir()

View file

@ -86,8 +86,8 @@ func (b *bstream) writeBit(bit bit) {
func (b *bstream) writeByte(byt byte) {
if b.count == 0 {
b.stream = append(b.stream, 0)
b.count = 8
b.stream = append(b.stream, byt)
return
}
i := len(b.stream) - 1

View file

@ -173,6 +173,12 @@ type Options struct {
// EnableNativeHistograms enables the ingestion of native histograms.
EnableNativeHistograms bool
// EnableOOONativeHistograms enables the ingestion of OOO native histograms.
// It will only take effect if EnableNativeHistograms is set to true and the
// OutOfOrderTimeWindow is > 0. This flag will be removed after testing of
// OOO Native Histogram ingestion is complete.
EnableOOONativeHistograms bool
// OutOfOrderTimeWindow specifies how much out of order is allowed, if any.
// This can change during run-time, so this value from here should only be used
// while initialising.
@ -948,6 +954,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
headOpts.MaxExemplars.Store(opts.MaxExemplars)
headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown
headOpts.EnableNativeHistograms.Store(opts.EnableNativeHistograms)
headOpts.EnableOOONativeHistograms.Store(opts.EnableOOONativeHistograms)
headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow)
headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax)
headOpts.EnableSharding = opts.EnableSharding
@ -1172,6 +1179,16 @@ func (db *DB) DisableNativeHistograms() {
db.head.DisableNativeHistograms()
}
// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms.
func (db *DB) EnableOOONativeHistograms() {
db.head.EnableOOONativeHistograms()
}
// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms.
func (db *DB) DisableOOONativeHistograms() {
db.head.DisableOOONativeHistograms()
}
// dbAppender wraps the DB's head appender and triggers compactions on commit
// if necessary.
type dbAppender struct {

File diff suppressed because it is too large Load diff

View file

@ -150,6 +150,11 @@ type HeadOptions struct {
// EnableNativeHistograms enables the ingestion of native histograms.
EnableNativeHistograms atomic.Bool
// EnableOOONativeHistograms enables the ingestion of OOO native histograms.
// It will only take effect if EnableNativeHistograms is set to true and the
// OutOfOrderTimeWindow is > 0
EnableOOONativeHistograms atomic.Bool
// EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample.
// See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md
EnableCreatedTimestampZeroIngestion bool
@ -1018,6 +1023,16 @@ func (h *Head) DisableNativeHistograms() {
h.opts.EnableNativeHistograms.Store(false)
}
// EnableOOONativeHistograms enables the ingestion of out-of-order native histograms.
func (h *Head) EnableOOONativeHistograms() {
h.opts.EnableOOONativeHistograms.Store(true)
}
// DisableOOONativeHistograms disables the ingestion of out-of-order native histograms.
func (h *Head) DisableOOONativeHistograms() {
h.opts.EnableOOONativeHistograms.Store(false)
}
// PostingsCardinalityStats returns highest cardinality stats by label and value names.
func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats {
cacheKey := statsByLabelName + ";" + strconv.Itoa(limit)

View file

@ -321,8 +321,8 @@ type headAppender struct {
}
func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
// For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append.
// If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work.
// Fail fast if OOO is disabled and the sample is out of bounds.
// Otherwise a full check will be done later to decide if the sample is in-order or out-of-order.
if a.oooTimeWindow == 0 && t < a.minValidTime {
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Inc()
return 0, storage.ErrOutOfBounds
@ -493,46 +493,94 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi
return false, headMaxt - t, storage.ErrOutOfOrderSample
}
// appendableHistogram checks whether the given histogram is valid for appending to the series.
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram) error {
if s.headChunks == nil {
return nil
// appendableHistogram checks whether the given histogram sample is valid for appending to the series. (if we return false and no error)
// The sample belongs to the out of order chunk if we return true and no error.
// An error signifies the sample cannot be handled.
func (s *memSeries) appendableHistogram(t int64, h *histogram.Histogram, headMaxt, minValidTime, oooTimeWindow int64, oooHistogramsEnabled bool) (isOOO bool, oooDelta int64, err error) {
// Check if we can append in the in-order chunk.
if t >= minValidTime {
if s.headChunks == nil {
// The series has no sample and was freshly created.
return false, 0, nil
}
msMaxt := s.maxTime()
if t > msMaxt {
return false, 0, nil
}
if t == msMaxt {
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if !h.Equals(s.lastHistogramValue) {
return false, 0, storage.ErrDuplicateSampleForTimestamp
}
// Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf.
return false, 0, nil
}
}
if t > s.headChunks.maxTime {
return nil
}
if t < s.headChunks.maxTime {
return storage.ErrOutOfOrderSample
// The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk.
if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow {
if !oooHistogramsEnabled {
return true, headMaxt - t, storage.ErrOOONativeHistogramsDisabled
}
return true, headMaxt - t, nil
}
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
if !h.Equals(s.lastHistogramValue) {
return storage.ErrDuplicateSampleForTimestamp
// The sample cannot go in both in-order and out-of-order chunk.
if oooTimeWindow > 0 {
return true, headMaxt - t, storage.ErrTooOldSample
}
return nil
if t < minValidTime {
return false, headMaxt - t, storage.ErrOutOfBounds
}
return false, headMaxt - t, storage.ErrOutOfOrderSample
}
// appendableFloatHistogram checks whether the given float histogram is valid for appending to the series.
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram) error {
if s.headChunks == nil {
return nil
// appendableFloatHistogram checks whether the given float histogram sample is valid for appending to the series. (if we return false and no error)
// The sample belongs to the out of order chunk if we return true and no error.
// An error signifies the sample cannot be handled.
func (s *memSeries) appendableFloatHistogram(t int64, fh *histogram.FloatHistogram, headMaxt, minValidTime, oooTimeWindow int64, oooHistogramsEnabled bool) (isOOO bool, oooDelta int64, err error) {
// Check if we can append in the in-order chunk.
if t >= minValidTime {
if s.headChunks == nil {
// The series has no sample and was freshly created.
return false, 0, nil
}
msMaxt := s.maxTime()
if t > msMaxt {
return false, 0, nil
}
if t == msMaxt {
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
// This only checks against the latest in-order sample.
// The OOO headchunk has its own method to detect these duplicates.
if !fh.Equals(s.lastFloatHistogramValue) {
return false, 0, storage.ErrDuplicateSampleForTimestamp
}
// Sample is identical (ts + value) with most current (highest ts) sample in sampleBuf.
return false, 0, nil
}
}
if t > s.headChunks.maxTime {
return nil
}
if t < s.headChunks.maxTime {
return storage.ErrOutOfOrderSample
// The sample cannot go in the in-order chunk. Check if it can go in the out-of-order chunk.
if oooTimeWindow > 0 && t >= headMaxt-oooTimeWindow {
if !oooHistogramsEnabled {
return true, headMaxt - t, storage.ErrOOONativeHistogramsDisabled
}
return true, headMaxt - t, nil
}
// We are allowing exact duplicates as we can encounter them in valid cases
// like federation and erroring out at that time would be extremely noisy.
if !fh.Equals(s.lastFloatHistogramValue) {
return storage.ErrDuplicateSampleForTimestamp
// The sample cannot go in both in-order and out-of-order chunk.
if oooTimeWindow > 0 {
return true, headMaxt - t, storage.ErrTooOldSample
}
return nil
if t < minValidTime {
return false, headMaxt - t, storage.ErrOutOfBounds
}
return false, headMaxt - t, storage.ErrOutOfOrderSample
}
// AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't
@ -577,7 +625,9 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return 0, storage.ErrNativeHistogramsDisabled
}
if t < a.minValidTime {
// Fail fast if OOO is disabled and the sample is out of bounds.
// Otherwise a full check will be done later to decide if the sample is in-order or out-of-order.
if (a.oooTimeWindow == 0 || !a.head.opts.EnableOOONativeHistograms.Load()) && t < a.minValidTime {
a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
return 0, storage.ErrOutOfBounds
}
@ -629,15 +679,27 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
switch {
case h != nil:
s.Lock()
if err := s.appendableHistogram(t, h); err != nil {
s.Unlock()
if errors.Is(err, storage.ErrOutOfOrderSample) {
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableHistogram(t, h, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load())
if err != nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderSample):
fallthrough
case errors.Is(err, storage.ErrOOONativeHistogramsDisabled):
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
case errors.Is(err, storage.ErrTooOldSample):
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
}
return 0, err
}
s.pendingCommit = true
s.Unlock()
a.histograms = append(a.histograms, record.RefHistogramSample{
Ref: s.ref,
T: t,
@ -646,15 +708,27 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
a.histogramSeries = append(a.histogramSeries, s)
case fh != nil:
s.Lock()
if err := s.appendableFloatHistogram(t, fh); err != nil {
s.Unlock()
if errors.Is(err, storage.ErrOutOfOrderSample) {
// TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise
// to skip that sample from the WAL and write only in the WBL.
_, delta, err := s.appendableFloatHistogram(t, fh, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load())
if err == nil {
s.pendingCommit = true
}
s.Unlock()
if delta > 0 {
a.head.metrics.oooHistogram.Observe(float64(delta) / 1000)
}
if err != nil {
switch {
case errors.Is(err, storage.ErrOutOfOrderSample):
fallthrough
case errors.Is(err, storage.ErrOOONativeHistogramsDisabled):
a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
case errors.Is(err, storage.ErrTooOldSample):
a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeHistogram).Inc()
}
return 0, err
}
s.pendingCommit = true
s.Unlock()
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: t,
@ -841,20 +915,24 @@ func (a *headAppender) Commit() (err error) {
floatsAppended = len(a.samples)
histogramsAppended = len(a.histograms) + len(a.floatHistograms)
// number of samples out of order but accepted: with ooo enabled and within time window
oooFloatsAccepted int
oooFloatsAccepted int
oooHistogramAccepted int
// number of samples rejected due to: out of order but OOO support disabled.
floatOOORejected int
histoOOORejected int
// number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window)
floatTooOldRejected int
histoTooOldRejected int
// number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled)
floatOOBRejected int
floatOOBRejected int
histoOOBRejected int
inOrderMint int64 = math.MaxInt64
inOrderMaxt int64 = math.MinInt64
oooMinT int64 = math.MaxInt64
oooMaxT int64 = math.MinInt64
wblSamples []record.RefSample
wblHistograms []record.RefHistogramSample
wblFloatHistograms []record.RefFloatHistogramSample
oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef
oooMmapMarkersCount int
oooRecords [][]byte
@ -876,6 +954,8 @@ func (a *headAppender) Commit() (err error) {
if a.head.wbl == nil {
// WBL is not enabled. So no need to collect.
wblSamples = nil
wblHistograms = nil
wblFloatHistograms = nil
oooMmapMarkers = nil
oooMmapMarkersCount = 0
return
@ -903,8 +983,18 @@ func (a *headAppender) Commit() (err error) {
r := enc.Samples(wblSamples, a.head.getBytesBuffer())
oooRecords = append(oooRecords, r)
}
if len(wblHistograms) > 0 {
r := enc.HistogramSamples(wblHistograms, a.head.getBytesBuffer())
oooRecords = append(oooRecords, r)
}
if len(wblFloatHistograms) > 0 {
r := enc.FloatHistogramSamples(wblFloatHistograms, a.head.getBytesBuffer())
oooRecords = append(oooRecords, r)
}
wblSamples = nil
wblHistograms = nil
wblFloatHistograms = nil
oooMmapMarkers = nil
}
for i, s := range a.samples {
@ -1006,51 +1096,193 @@ func (a *headAppender) Commit() (err error) {
for i, s := range a.histograms {
series = a.histogramSeries[i]
series.Lock()
ok, chunkCreated := series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts)
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false
series.Unlock()
if ok {
if s.T < inOrderMint {
inOrderMint = s.T
}
if s.T > inOrderMaxt {
inOrderMaxt = s.T
}
} else {
oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load())
switch {
case err == nil:
// Do nothing.
case errors.Is(err, storage.ErrOutOfOrderSample):
histogramsAppended--
histoOOORejected++
case errors.Is(err, storage.ErrOutOfBounds):
histogramsAppended--
histoOOBRejected++
case errors.Is(err, storage.ErrTooOldSample):
histogramsAppended--
histoTooOldRejected++
default:
histogramsAppended--
}
var ok, chunkCreated bool
switch {
case err != nil:
// Do nothing here.
case oooSample:
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
var mmapRefs []chunks.ChunkDiskMapperRef
ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
if chunkCreated {
r, ok := oooMmapMarkers[series.ref]
if !ok || r != nil {
// !ok means there are no markers collected for these samples yet. So we first flush the samples
// before setting this m-map marker.
// r != 0 means we have already m-mapped a chunk for this series in the same Commit().
// Hence, before we m-map again, we should add the samples and m-map markers
// seen till now to the WBL records.
collectOOORecords()
}
if oooMmapMarkers == nil {
oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
}
if len(mmapRefs) > 0 {
oooMmapMarkers[series.ref] = mmapRefs
oooMmapMarkersCount += len(mmapRefs)
} else {
// No chunk was written to disk, so we need to set an initial marker for this series.
oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
oooMmapMarkersCount++
}
}
if ok {
wblHistograms = append(wblHistograms, s)
if s.T < oooMinT {
oooMinT = s.T
}
if s.T > oooMaxT {
oooMaxT = s.T
}
oooHistogramAccepted++
} else {
// Sample is an exact duplicate of the last sample.
// NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk,
// not with samples in already flushed OOO chunks.
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
histogramsAppended--
}
default:
ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts)
if ok {
if s.T < inOrderMint {
inOrderMint = s.T
}
if s.T > inOrderMaxt {
inOrderMaxt = s.T
}
} else {
histogramsAppended--
histoOOORejected++
}
}
if chunkCreated {
a.head.metrics.chunks.Inc()
a.head.metrics.chunksCreated.Inc()
}
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false
series.Unlock()
}
for i, s := range a.floatHistograms {
series = a.floatHistogramSeries[i]
series.Lock()
ok, chunkCreated := series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts)
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false
series.Unlock()
if ok {
if s.T < inOrderMint {
inOrderMint = s.T
}
if s.T > inOrderMaxt {
inOrderMaxt = s.T
}
} else {
oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load())
switch {
case err == nil:
// Do nothing.
case errors.Is(err, storage.ErrOutOfOrderSample):
histogramsAppended--
histoOOORejected++
case errors.Is(err, storage.ErrOutOfBounds):
histogramsAppended--
histoOOBRejected++
case errors.Is(err, storage.ErrTooOldSample):
histogramsAppended--
histoTooOldRejected++
default:
histogramsAppended--
}
var ok, chunkCreated bool
switch {
case err != nil:
// Do nothing here.
case oooSample:
// Sample is OOO and OOO handling is enabled
// and the delta is within the OOO tolerance.
var mmapRefs []chunks.ChunkDiskMapperRef
ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, oooCapMax, a.head.logger)
if chunkCreated {
r, ok := oooMmapMarkers[series.ref]
if !ok || r != nil {
// !ok means there are no markers collected for these samples yet. So we first flush the samples
// before setting this m-map marker.
// r != 0 means we have already m-mapped a chunk for this series in the same Commit().
// Hence, before we m-map again, we should add the samples and m-map markers
// seen till now to the WBL records.
collectOOORecords()
}
if oooMmapMarkers == nil {
oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef)
}
if len(mmapRefs) > 0 {
oooMmapMarkers[series.ref] = mmapRefs
oooMmapMarkersCount += len(mmapRefs)
} else {
// No chunk was written to disk, so we need to set an initial marker for this series.
oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0}
oooMmapMarkersCount++
}
}
if ok {
wblFloatHistograms = append(wblFloatHistograms, s)
if s.T < oooMinT {
oooMinT = s.T
}
if s.T > oooMaxT {
oooMaxT = s.T
}
oooHistogramAccepted++
} else {
// Sample is an exact duplicate of the last sample.
// NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk,
// not with samples in already flushed OOO chunks.
// TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305.
histogramsAppended--
}
default:
ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts)
if ok {
if s.T < inOrderMint {
inOrderMint = s.T
}
if s.T > inOrderMaxt {
inOrderMaxt = s.T
}
} else {
histogramsAppended--
histoOOORejected++
}
}
if chunkCreated {
a.head.metrics.chunks.Inc()
a.head.metrics.chunksCreated.Inc()
}
series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow)
series.pendingCommit = false
series.Unlock()
}
for i, m := range a.metadata {
@ -1067,6 +1299,7 @@ func (a *headAppender) Commit() (err error) {
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended))
a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted))
a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(oooHistogramAccepted))
a.head.updateMinMaxTime(inOrderMint, inOrderMaxt)
a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT)
@ -1469,13 +1702,13 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap
handleChunkWriteError(err)
return nil
}
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1)
chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, len(chks))
for _, memchunk := range chks {
if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) {
level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String())
break
}
chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
chunkRef := chunkDiskMapper.WriteChunk(s.ref, memchunk.minTime, memchunk.maxTime, memchunk.chunk, true, handleChunkWriteError)
chunkRefs = append(chunkRefs, chunkRef)
s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{
ref: chunkRef,

View file

@ -23,6 +23,7 @@ import (
"path"
"path/filepath"
"reflect"
"slices"
"sort"
"strconv"
"strings"
@ -2101,6 +2102,36 @@ func TestHead_LogRollback(t *testing.T) {
}
}
func TestHead_ReturnsSortedLabelValues(t *testing.T) {
h, _ := newTestHead(t, 1000, wlog.CompressionNone, false)
defer func() {
require.NoError(t, h.Close())
}()
h.initTime(0)
app := h.appender()
for i := 100; i > 0; i-- {
for j := 0; j < 10; j++ {
lset := labels.FromStrings(
"__name__", fmt.Sprintf("metric_%d", i),
"label", fmt.Sprintf("value_%d", j),
)
_, err := app.Append(0, lset, 2100, 1)
require.NoError(t, err)
}
}
q, err := NewBlockQuerier(h, 1500, 2500)
require.NoError(t, err)
res, _, err := q.LabelValues(context.Background(), "__name__", nil)
require.NoError(t, err)
require.True(t, slices.IsSorted(res))
require.NoError(t, q.Close())
}
// TestWalRepair_DecodingError ensures that a repair is run for an error
// when decoding a record.
func TestWalRepair_DecodingError(t *testing.T) {
@ -2692,15 +2723,32 @@ func TestIsolationWithoutAdd(t *testing.T) {
func TestOutOfOrderSamplesMetric(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testOutOfOrderSamplesMetric(t, scenario)
options := DefaultOptions()
options.EnableNativeHistograms = true
options.EnableOOONativeHistograms = true
testOutOfOrderSamplesMetric(t, scenario, options, storage.ErrOutOfOrderSample)
})
}
}
func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
dir := t.TempDir()
func TestOutOfOrderSamplesMetricNativeHistogramOOODisabled(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
if scenario.sampleType != "histogram" {
continue
}
t.Run(name, func(t *testing.T) {
options := DefaultOptions()
options.OutOfOrderTimeWindow = (1000 * time.Minute).Milliseconds()
options.EnableNativeHistograms = true
options.EnableOOONativeHistograms = false
testOutOfOrderSamplesMetric(t, scenario, options, storage.ErrOOONativeHistogramsDisabled)
})
}
}
db, err := Open(dir, nil, nil, DefaultOptions(), nil)
func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario, options *Options, expectOutOfOrderError error) {
dir := t.TempDir()
db, err := Open(dir, nil, nil, options, nil)
require.NoError(t, err)
defer func() {
require.NoError(t, db.Close())
@ -2724,15 +2772,15 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
require.Equal(t, 0.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
app = db.Appender(ctx)
_, err = appendSample(app, 2)
require.Equal(t, storage.ErrOutOfOrderSample, err)
require.Equal(t, expectOutOfOrderError, err)
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
_, err = appendSample(app, 3)
require.Equal(t, storage.ErrOutOfOrderSample, err)
require.Equal(t, expectOutOfOrderError, err)
require.Equal(t, 2.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
_, err = appendSample(app, 4)
require.Equal(t, storage.ErrOutOfOrderSample, err)
require.Equal(t, expectOutOfOrderError, err)
require.Equal(t, 3.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
require.NoError(t, app.Commit())
@ -2767,15 +2815,15 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) {
// Test out of order metric.
app = db.Appender(ctx)
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+2)
require.Equal(t, storage.ErrOutOfOrderSample, err)
require.Equal(t, expectOutOfOrderError, err)
require.Equal(t, 4.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+3)
require.Equal(t, storage.ErrOutOfOrderSample, err)
require.Equal(t, expectOutOfOrderError, err)
require.Equal(t, 5.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
_, err = appendSample(app, db.head.minValidTime.Load()+DefaultBlockDuration+4)
require.Equal(t, storage.ErrOutOfOrderSample, err)
require.Equal(t, expectOutOfOrderError, err)
require.Equal(t, 6.0, prom_testutil.ToFloat64(db.head.metrics.outOfOrderSamples.WithLabelValues(scenario.sampleType)))
require.NoError(t, app.Commit())
}
@ -4626,10 +4674,172 @@ func TestHistogramCounterResetHeader(t *testing.T) {
}
}
func TestOOOHistogramCounterResetHeaders(t *testing.T) {
for _, floatHisto := range []bool{true, false} {
t.Run(fmt.Sprintf("floatHistogram=%t", floatHisto), func(t *testing.T) {
l := labels.FromStrings("a", "b")
head, _ := newTestHead(t, 1000, wlog.CompressionNone, true)
head.opts.OutOfOrderCapMax.Store(5)
head.opts.EnableOOONativeHistograms.Store(true)
t.Cleanup(func() {
require.NoError(t, head.Close())
})
require.NoError(t, head.Init(0))
appendHistogram := func(ts int64, h *histogram.Histogram) {
app := head.Appender(context.Background())
var err error
if floatHisto {
_, err = app.AppendHistogram(0, l, ts, nil, h.ToFloat(nil))
} else {
_, err = app.AppendHistogram(0, l, ts, h.Copy(), nil)
}
require.NoError(t, err)
require.NoError(t, app.Commit())
}
type expOOOMmappedChunks struct {
header chunkenc.CounterResetHeader
mint, maxt int64
numSamples uint16
}
var expChunks []expOOOMmappedChunks
checkOOOExpCounterResetHeader := func(newChunks ...expOOOMmappedChunks) {
expChunks = append(expChunks, newChunks...)
ms, _, err := head.getOrCreate(l.Hash(), l)
require.NoError(t, err)
require.Len(t, ms.ooo.oooMmappedChunks, len(expChunks))
for i, mmapChunk := range ms.ooo.oooMmappedChunks {
chk, err := head.chunkDiskMapper.Chunk(mmapChunk.ref)
require.NoError(t, err)
if floatHisto {
require.Equal(t, expChunks[i].header, chk.(*chunkenc.FloatHistogramChunk).GetCounterResetHeader())
} else {
require.Equal(t, expChunks[i].header, chk.(*chunkenc.HistogramChunk).GetCounterResetHeader())
}
require.Equal(t, expChunks[i].mint, mmapChunk.minTime)
require.Equal(t, expChunks[i].maxt, mmapChunk.maxTime)
require.Equal(t, expChunks[i].numSamples, mmapChunk.numSamples)
}
}
// Append an in-order histogram, so the rest of the samples can be detected as OOO.
appendHistogram(1000, tsdbutil.GenerateTestHistogram(1000))
// OOO histogram
for i := 1; i <= 5; i++ {
appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+i))
}
// Nothing mmapped yet.
checkOOOExpCounterResetHeader()
// 6th observation (which triggers a head chunk mmapping).
appendHistogram(int64(112), tsdbutil.GenerateTestHistogram(1002))
// One mmapped chunk with (ts, val) [(101, 1001), (102, 1002), (103, 1003), (104, 1004), (105, 1005)].
checkOOOExpCounterResetHeader(expOOOMmappedChunks{
header: chunkenc.UnknownCounterReset,
mint: 101,
maxt: 105,
numSamples: 5,
})
// Add more samples, there's a counter reset at ts 122.
appendHistogram(int64(110), tsdbutil.GenerateTestHistogram(1001))
appendHistogram(int64(124), tsdbutil.GenerateTestHistogram(904))
appendHistogram(int64(123), tsdbutil.GenerateTestHistogram(903))
appendHistogram(int64(122), tsdbutil.GenerateTestHistogram(902))
// New samples not mmapped yet.
checkOOOExpCounterResetHeader()
// 11th observation (which triggers another head chunk mmapping).
appendHistogram(int64(200), tsdbutil.GenerateTestHistogram(2000))
// Two new mmapped chunks [(110, 1001), (112, 1002)], [(122, 902), (123, 903), (124, 904)].
checkOOOExpCounterResetHeader(
expOOOMmappedChunks{
header: chunkenc.UnknownCounterReset,
mint: 110,
maxt: 112,
numSamples: 2,
},
expOOOMmappedChunks{
header: chunkenc.CounterReset,
mint: 122,
maxt: 124,
numSamples: 3,
},
)
// Count is lower than previous sample at ts 200, and NotCounterReset is always ignored on append.
appendHistogram(int64(205), tsdbutil.SetHistogramNotCounterReset(tsdbutil.GenerateTestHistogram(1000)))
appendHistogram(int64(210), tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(2010)))
appendHistogram(int64(220), tsdbutil.GenerateTestHistogram(2020))
appendHistogram(int64(215), tsdbutil.GenerateTestHistogram(2005))
// 16th observation (which triggers another head chunk mmapping).
appendHistogram(int64(350), tsdbutil.GenerateTestHistogram(4000))
// Four new mmapped chunks: [(200, 2000)] [(205, 1000)], [(210, 2010)], [(215, 2015), (220, 2020)]
checkOOOExpCounterResetHeader(
expOOOMmappedChunks{
header: chunkenc.UnknownCounterReset,
mint: 200,
maxt: 200,
numSamples: 1,
},
expOOOMmappedChunks{
header: chunkenc.CounterReset,
mint: 205,
maxt: 205,
numSamples: 1,
},
expOOOMmappedChunks{
header: chunkenc.CounterReset,
mint: 210,
maxt: 210,
numSamples: 1,
},
expOOOMmappedChunks{
header: chunkenc.CounterReset,
mint: 215,
maxt: 220,
numSamples: 2,
},
)
// Adding five more samples (21 in total), so another mmapped chunk is created.
appendHistogram(300, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(3000)))
for i := 1; i <= 4; i++ {
appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+i))
}
// One mmapped chunk with (ts, val) [(300, 3000), (301, 3001), (302, 3002), (303, 3003), (350, 4000)].
checkOOOExpCounterResetHeader(expOOOMmappedChunks{
header: chunkenc.CounterReset,
mint: 300,
maxt: 350,
numSamples: 5,
})
})
}
}
func TestAppendingDifferentEncodingToSameSeries(t *testing.T) {
dir := t.TempDir()
opts := DefaultOptions()
opts.EnableNativeHistograms = true
opts.EnableOOONativeHistograms = true
db, err := Open(dir, nil, nil, opts, nil)
require.NoError(t, err)
t.Cleanup(func() {
@ -4900,6 +5110,8 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
opts.ChunkRange = 1000
opts.ChunkDirRoot = dir
opts.OutOfOrderTimeWindow.Store(30 * time.Minute.Milliseconds())
opts.EnableNativeHistograms.Store(true)
opts.EnableOOONativeHistograms.Store(true)
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
require.NoError(t, err)
@ -4909,13 +5121,12 @@ func testWBLReplay(t *testing.T, scenario sampleTypeScenario) {
l := labels.FromStrings("foo", "bar")
appendSample := func(mins int64, val float64, isOOO bool) {
app := h.Appender(context.Background())
ts, v := mins*time.Minute.Milliseconds(), val
_, err := app.Append(0, l, ts, v)
_, s, err := scenario.appendFunc(app, l, mins*time.Minute.Milliseconds(), mins)
require.NoError(t, err)
require.NoError(t, app.Commit())
if isOOO {
expOOOSamples = append(expOOOSamples, sample{t: ts, f: v})
expOOOSamples = append(expOOOSamples, s)
}
}
@ -4994,6 +5205,8 @@ func testOOOMmapReplay(t *testing.T, scenario sampleTypeScenario) {
opts.ChunkDirRoot = dir
opts.OutOfOrderCapMax.Store(30)
opts.OutOfOrderTimeWindow.Store(1000 * time.Minute.Milliseconds())
opts.EnableNativeHistograms.Store(true)
opts.EnableOOONativeHistograms.Store(true)
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
require.NoError(t, err)
@ -5295,6 +5508,8 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap
opts.ChunkDirRoot = dir
opts.OutOfOrderCapMax.Store(30)
opts.OutOfOrderTimeWindow.Store(120 * time.Minute.Milliseconds())
opts.EnableNativeHistograms.Store(true)
opts.EnableOOONativeHistograms.Store(true)
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
require.NoError(t, err)
@ -5368,7 +5583,9 @@ func testOOOAppendWithNoSeries(t *testing.T, appendFunc func(appender storage.Ap
func TestHeadMinOOOTimeUpdate(t *testing.T) {
for name, scenario := range sampleTypeScenarios {
t.Run(name, func(t *testing.T) {
testHeadMinOOOTimeUpdate(t, scenario)
if scenario.sampleType == sampleMetricTypeFloat {
testHeadMinOOOTimeUpdate(t, scenario)
}
})
}
}
@ -5383,6 +5600,8 @@ func testHeadMinOOOTimeUpdate(t *testing.T, scenario sampleTypeScenario) {
opts := DefaultHeadOptions()
opts.ChunkDirRoot = dir
opts.OutOfOrderTimeWindow.Store(10 * time.Minute.Milliseconds())
opts.EnableNativeHistograms.Store(true)
opts.EnableOOONativeHistograms.Store(true)
h, err := NewHead(nil, nil, wal, oooWlog, opts, nil)
require.NoError(t, err)

View file

@ -646,9 +646,9 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp
}
func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) {
// Track number of samples, m-map markers, that referenced a series we don't know about
// Track number of samples, histogram samples, m-map markers, that referenced a series we don't know about
// for error reporting.
var unknownRefs, mmapMarkerUnknownRefs atomic.Uint64
var unknownRefs, unknownHistogramRefs, mmapMarkerUnknownRefs atomic.Uint64
lastSeq, lastOff := lastMmapRef.Unpack()
// Start workers that each process samples for a partition of the series ID space.
@ -657,8 +657,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
concurrency = h.opts.WALReplayConcurrency
processors = make([]wblSubsetProcessor, concurrency)
dec = record.NewDecoder(syms)
shards = make([][]record.RefSample, concurrency)
dec record.Decoder
shards = make([][]record.RefSample, concurrency)
histogramShards = make([][]histogramRecord, concurrency)
decodedCh = make(chan interface{}, 10)
decodeErr error
@ -672,6 +673,16 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
return []record.RefMmapMarker{}
},
}
histogramSamplesPool = sync.Pool{
New: func() interface{} {
return []record.RefHistogramSample{}
},
}
floatHistogramSamplesPool = sync.Pool{
New: func() interface{} {
return []record.RefFloatHistogramSample{}
},
}
)
defer func() {
@ -692,8 +703,9 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
processors[i].setup()
go func(wp *wblSubsetProcessor) {
unknown := wp.processWBLSamples(h)
unknown, unknownHistograms := wp.processWBLSamples(h)
unknownRefs.Add(unknown)
unknownHistogramRefs.Add(unknownHistograms)
wg.Done()
}(&processors[i])
}
@ -727,6 +739,30 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
return
}
decodedCh <- markers
case record.HistogramSamples:
hists := histogramSamplesPool.Get().([]record.RefHistogramSample)[:0]
hists, err = dec.HistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- hists
case record.FloatHistogramSamples:
hists := floatHistogramSamplesPool.Get().([]record.RefFloatHistogramSample)[:0]
hists, err = dec.FloatHistogramSamples(rec, hists)
if err != nil {
decodeErr = &wlog.CorruptionErr{
Err: fmt.Errorf("decode float histograms: %w", err),
Segment: r.Segment(),
Offset: r.Offset(),
}
return
}
decodedCh <- hists
default:
// Noop.
}
@ -791,6 +827,70 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
idx := uint64(ms.ref) % uint64(concurrency)
processors[idx].input <- wblSubsetProcessorInputItem{mmappedSeries: ms}
}
case []record.RefHistogramSample:
samples := v
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, h: sam.H})
}
for i := 0; i < concurrency; i++ {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
histogramSamplesPool.Put(v) //nolint:staticcheck
case []record.RefFloatHistogramSample:
samples := v
// We split up the samples into chunks of 5000 samples or less.
// With O(300 * #cores) in-flight sample batches, large scrapes could otherwise
// cause thousands of very large in flight buffers occupying large amounts
// of unused memory.
for len(samples) > 0 {
m := 5000
if len(samples) < m {
m = len(samples)
}
for i := 0; i < concurrency; i++ {
if histogramShards[i] == nil {
histogramShards[i] = processors[i].reuseHistogramBuf()
}
}
for _, sam := range samples[:m] {
if r, ok := multiRef[sam.Ref]; ok {
sam.Ref = r
}
mod := uint64(sam.Ref) % uint64(concurrency)
histogramShards[mod] = append(histogramShards[mod], histogramRecord{ref: sam.Ref, t: sam.T, fh: sam.FH})
}
for i := 0; i < concurrency; i++ {
if len(histogramShards[i]) > 0 {
processors[i].input <- wblSubsetProcessorInputItem{histogramSamples: histogramShards[i]}
histogramShards[i] = nil
}
}
samples = samples[m:]
}
floatHistogramSamplesPool.Put(v) //nolint:staticcheck
default:
panic(fmt.Errorf("unexpected decodedCh type: %T", d))
}
@ -833,17 +933,20 @@ func (e errLoadWbl) Unwrap() error {
}
type wblSubsetProcessor struct {
input chan wblSubsetProcessorInputItem
output chan []record.RefSample
input chan wblSubsetProcessorInputItem
output chan []record.RefSample
histogramsOutput chan []histogramRecord
}
type wblSubsetProcessorInputItem struct {
mmappedSeries *memSeries
samples []record.RefSample
mmappedSeries *memSeries
samples []record.RefSample
histogramSamples []histogramRecord
}
func (wp *wblSubsetProcessor) setup() {
wp.output = make(chan []record.RefSample, 300)
wp.histogramsOutput = make(chan []histogramRecord, 300)
wp.input = make(chan wblSubsetProcessorInputItem, 300)
}
@ -851,6 +954,8 @@ func (wp *wblSubsetProcessor) closeAndDrain() {
close(wp.input)
for range wp.output {
}
for range wp.histogramsOutput {
}
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
@ -863,10 +968,21 @@ func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample {
return nil
}
// If there is a buffer in the output chan, return it for reuse, otherwise return nil.
func (wp *wblSubsetProcessor) reuseHistogramBuf() []histogramRecord {
select {
case buf := <-wp.histogramsOutput:
return buf[:0]
default:
}
return nil
}
// processWBLSamples adds the samples it receives to the head and passes
// the buffer received to an output channel for reuse.
func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs, unknownHistogramRefs uint64) {
defer close(wp.output)
defer close(wp.histogramsOutput)
oooCapMax := h.opts.OutOfOrderCapMax.Load()
// We don't check for minValidTime for ooo samples.
@ -905,11 +1021,41 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) {
case wp.output <- in.samples:
default:
}
for _, s := range in.histogramSamples {
ms := h.series.getByID(s.ref)
if ms == nil {
unknownHistogramRefs++
continue
}
var chunkCreated bool
var ok bool
if s.h != nil {
ok, chunkCreated, _ = ms.insert(s.t, 0, s.h, nil, h.chunkDiskMapper, oooCapMax, h.logger)
} else {
ok, chunkCreated, _ = ms.insert(s.t, 0, nil, s.fh, h.chunkDiskMapper, oooCapMax, h.logger)
}
if chunkCreated {
h.metrics.chunksCreated.Inc()
h.metrics.chunks.Inc()
}
if ok {
if s.t > maxt {
maxt = s.t
}
if s.t < mint {
mint = s.t
}
}
}
select {
case wp.histogramsOutput <- in.histogramSamples:
default:
}
}
h.updateMinOOOMaxOOOTime(mint, maxt)
return unknownRefs
return unknownRefs, unknownHistogramRefs
}
const (

View file

@ -111,7 +111,7 @@ func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmap
return nil
}
for _, chk := range chks {
addChunk(c.minTime, c.maxTime, ref, chk.chunk)
addChunk(chk.minTime, chk.maxTime, ref, chk.chunk)
}
} else {
var emptyChunk chunkenc.Chunk

View file

@ -389,6 +389,7 @@ func TestOOOHeadChunkReader_LabelValues(t *testing.T) {
func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenario) {
chunkRange := int64(2000)
head, _ := newTestHead(t, chunkRange, wlog.CompressionNone, true)
head.opts.EnableOOONativeHistograms.Store(true)
t.Cleanup(func() { require.NoError(t, head.Close()) })
ctx := context.Background()
@ -493,6 +494,8 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) {
opts := DefaultOptions()
opts.OutOfOrderCapMax = 5
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
opts.EnableNativeHistograms = true
opts.EnableOOONativeHistograms = true
s1 := labels.FromStrings("l", "v1")
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }
@ -902,6 +905,8 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
opts := DefaultOptions()
opts.OutOfOrderCapMax = 5
opts.OutOfOrderTimeWindow = 120 * time.Minute.Milliseconds()
opts.EnableNativeHistograms = true
opts.EnableOOONativeHistograms = true
s1 := labels.FromStrings("l", "v1")
minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() }

View file

@ -28,15 +28,14 @@ import (
const testMaxSize int = 32
// Formulas chosen to make testing easy.
func valEven(pos int) int { return pos*2 + 2 } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values
func valOdd(pos int) int { return pos*2 + 1 } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals.
// Formulas chosen to make testing easy.
func valEven(pos int) int64 { return int64(pos*2 + 2) } // s[0]=2, s[1]=4, s[2]=6, ..., s[31]=64 - Predictable pre-existing values
func valOdd(pos int) int64 { return int64(pos*2 + 1) } // s[0]=1, s[1]=3, s[2]=5, ..., s[31]=63 - New values will interject at chosen position because they sort before the pre-existing vals.
func samplify(v int) sample { return sample{int64(v), float64(v), nil, nil} }
func makeEvenSampleSlice(n int) []sample {
func makeEvenSampleSlice(n int, sampleFunc func(ts int64) sample) []sample {
s := make([]sample, n)
for i := 0; i < n; i++ {
s[i] = samplify(valEven(i))
s[i] = sampleFunc(valEven(i))
}
return s
}
@ -45,8 +44,36 @@ func makeEvenSampleSlice(n int) []sample {
// - Number of pre-existing samples anywhere from 0 to testMaxSize-1.
// - Insert new sample before first pre-existing samples, after the last, and anywhere in between.
// - With a chunk initial capacity of testMaxSize/8 and testMaxSize, which lets us test non-full and full chunks, and chunks that need to expand themselves.
// Note: In all samples used, t always equals v in numeric value. when we talk about 'value' we just refer to a value that will be used for both sample.t and sample.v.
func TestOOOInsert(t *testing.T) {
scenarios := map[string]struct {
sampleFunc func(ts int64) sample
}{
"float": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, f: float64(ts)}
},
},
"integer histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
},
},
"float histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))}
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
testOOOInsert(t, scenario.sampleFunc)
})
}
}
func testOOOInsert(t *testing.T,
sampleFunc func(ts int64) sample,
) {
for numPreExisting := 0; numPreExisting <= testMaxSize; numPreExisting++ {
// For example, if we have numPreExisting 2, then:
// chunk.samples indexes filled 0 1
@ -56,20 +83,21 @@ func TestOOOInsert(t *testing.T) {
for insertPos := 0; insertPos <= numPreExisting; insertPos++ {
chunk := NewOOOChunk()
chunk.samples = makeEvenSampleSlice(numPreExisting)
newSample := samplify(valOdd(insertPos))
chunk.Insert(newSample.t, newSample.f, nil, nil)
chunk.samples = make([]sample, numPreExisting)
chunk.samples = makeEvenSampleSlice(numPreExisting, sampleFunc)
newSample := sampleFunc(valOdd(insertPos))
chunk.Insert(newSample.t, newSample.f, newSample.h, newSample.fh)
var expSamples []sample
// Our expected new samples slice, will be first the original samples.
for i := 0; i < insertPos; i++ {
expSamples = append(expSamples, samplify(valEven(i)))
expSamples = append(expSamples, sampleFunc(valEven(i)))
}
// Then the new sample.
expSamples = append(expSamples, newSample)
// Followed by any original samples that were pushed back by the new one.
for i := insertPos; i < numPreExisting; i++ {
expSamples = append(expSamples, samplify(valEven(i)))
expSamples = append(expSamples, sampleFunc(valEven(i)))
}
require.Equal(t, expSamples, chunk.samples, "numPreExisting %d, insertPos %d", numPreExisting, insertPos)
@ -81,17 +109,46 @@ func TestOOOInsert(t *testing.T) {
// pre-existing samples, with between 1 and testMaxSize pre-existing samples and
// with a chunk initial capacity of testMaxSize/8 and testMaxSize, which lets us test non-full and full chunks, and chunks that need to expand themselves.
func TestOOOInsertDuplicate(t *testing.T) {
scenarios := map[string]struct {
sampleFunc func(ts int64) sample
}{
"float": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, f: float64(ts)}
},
},
"integer histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
},
},
"float histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))}
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
testOOOInsertDuplicate(t, scenario.sampleFunc)
})
}
}
func testOOOInsertDuplicate(t *testing.T,
sampleFunc func(ts int64) sample,
) {
for num := 1; num <= testMaxSize; num++ {
for dupPos := 0; dupPos < num; dupPos++ {
chunk := NewOOOChunk()
chunk.samples = makeEvenSampleSlice(num)
chunk.samples = makeEvenSampleSlice(num, sampleFunc)
dupSample := chunk.samples[dupPos]
dupSample.f = 0.123
ok := chunk.Insert(dupSample.t, dupSample.f, nil, nil)
ok := chunk.Insert(dupSample.t, dupSample.f, dupSample.h, dupSample.fh)
expSamples := makeEvenSampleSlice(num) // We expect no change.
expSamples := makeEvenSampleSlice(num, sampleFunc) // We expect no change.
require.False(t, ok)
require.Equal(t, expSamples, chunk.samples, "num %d, dupPos %d", num, dupPos)
}

View file

@ -254,6 +254,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc
return nil, err
}
its = append(its, allPostings)
case m.Type == labels.MatchRegexp && m.Value == ".*":
// .* regexp matches any string: do nothing.
case m.Type == labels.MatchNotRegexp && m.Value == ".*":
return index.EmptyPostings(), nil
case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter.
matchesEmpty := m.Matches("")

View file

@ -105,17 +105,17 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
jFoo := labels.MustNewMatcher(labels.MatchEqual, "j", "foo")
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")
i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.*$")
iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1$")
iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*1.*$")
iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.+$")
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "^1.+$")
iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "^$")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")
i1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.*")
iStar1 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*1")
iStar1Star := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*1.*")
iPlus := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+")
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.+")
iEmptyRe := labels.MustNewMatcher(labels.MatchRegexp, "i", "")
iNotEmpty := labels.MustNewMatcher(labels.MatchNotEqual, "i", "")
iNot2 := labels.MustNewMatcher(labels.MatchNotEqual, "i", "2"+postingsBenchSuffix)
iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^2.*$")
iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*2.*$")
iNot2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", "2.*")
iNotStar2Star := labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*2.*")
jFooBar := labels.MustNewMatcher(labels.MatchRegexp, "j", "foo|bar")
jXXXYYY := labels.MustNewMatcher(labels.MatchRegexp, "j", "XXX|YYY")
jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+")
@ -186,13 +186,13 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
i1Plus := labels.MustNewMatcher(labels.MatchRegexp, "i", "1.+")
i1PostingsBenchSuffix := labels.MustNewMatcher(labels.MatchEqual, "i", "1"+postingsBenchSuffix)
iSuffix := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+ddd")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")
iStar := labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")
jNotFoo := labels.MustNewMatcher(labels.MatchNotEqual, "j", "foo")
jXXXYYY := labels.MustNewMatcher(labels.MatchRegexp, "j", "XXX|YYY")
jXplus := labels.MustNewMatcher(labels.MatchRegexp, "j", "X.+")
n1 := labels.MustNewMatcher(labels.MatchEqual, "n", "1"+postingsBenchSuffix)
nX := labels.MustNewMatcher(labels.MatchNotEqual, "n", "X"+postingsBenchSuffix)
nPlus := labels.MustNewMatcher(labels.MatchRegexp, "n", "^.+$")
nPlus := labels.MustNewMatcher(labels.MatchRegexp, "n", ".+")
ctx := context.Background()
@ -205,12 +205,12 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
{`i with i="1"`, "i", []*labels.Matcher{i1}},
// i has 100k values.
{`i with n="1"`, "i", []*labels.Matcher{n1}},
{`i with n="^.+$"`, "i", []*labels.Matcher{nPlus}},
{`i with n=".+"`, "i", []*labels.Matcher{nPlus}},
{`i with n="1",j!="foo"`, "i", []*labels.Matcher{n1, jNotFoo}},
{`i with n="1",j=~"X.+"`, "i", []*labels.Matcher{n1, jXplus}},
{`i with n="1",j=~"XXX|YYY"`, "i", []*labels.Matcher{n1, jXXXYYY}},
{`i with n="X",j!="foo"`, "i", []*labels.Matcher{nX, jNotFoo}},
{`i with n="1",i=~"^.*$",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}},
{`i with n="1",i=~".*",j!="foo"`, "i", []*labels.Matcher{n1, iStar, jNotFoo}},
// matchers on i itself
{`i with i="1aaa...ddd"`, "i", []*labels.Matcher{i1PostingsBenchSuffix}},
{`i with i=~"1.+"`, "i", []*labels.Matcher{i1Plus}},

View file

@ -2689,6 +2689,7 @@ func TestPostingsForMatchers(t *testing.T) {
app.Append(0, labels.FromStrings("n", "1"), 0, 0)
app.Append(0, labels.FromStrings("n", "1", "i", "a"), 0, 0)
app.Append(0, labels.FromStrings("n", "1", "i", "b"), 0, 0)
app.Append(0, labels.FromStrings("n", "1", "i", "\n"), 0, 0)
app.Append(0, labels.FromStrings("n", "2"), 0, 0)
app.Append(0, labels.FromStrings("n", "2.5"), 0, 0)
require.NoError(t, app.Commit())
@ -2704,6 +2705,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2722,6 +2724,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
@ -2739,6 +2742,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2750,6 +2754,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2757,6 +2762,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
// Regex.
@ -2766,6 +2772,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2801,6 +2808,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2808,6 +2816,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
// Not regex.
@ -2816,6 +2825,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2849,12 +2859,14 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^a?$")},
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2862,6 +2874,7 @@ func TestPostingsForMatchers(t *testing.T) {
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
},
},
{
@ -2895,6 +2908,7 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
},
},
@ -2942,6 +2956,57 @@ func TestPostingsForMatchers(t *testing.T) {
labels.FromStrings("n", "2.5"),
},
},
// Test shortcut for i=~".*"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "i", ".*")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
// Test shortcut for n=~".*" and i=~"^.*$"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchRegexp, "i", "^.*$")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "1", "i", "a"),
labels.FromStrings("n", "1", "i", "b"),
labels.FromStrings("n", "1", "i", "\n"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
// Test shortcut for n=~"^.*$"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchEqual, "i", "a")},
exp: []labels.Labels{
labels.FromStrings("n", "1", "i", "a"),
},
},
// Test shortcut for i!~".*"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")},
exp: []labels.Labels{},
},
// Test shortcut for n!~"^.*$", i!~".*". First one triggers empty result.
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchNotRegexp, "n", "^.*$"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")},
exp: []labels.Labels{},
},
// Test shortcut i!~".*"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".*")},
exp: []labels.Labels{},
},
// Test shortcut i!~"^.*$"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")},
exp: []labels.Labels{},
},
}
ir, err := h.Index()

View file

@ -16,6 +16,8 @@ package tsdb
import (
"testing"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require"
@ -27,7 +29,11 @@ import (
)
const (
float = "float"
float = "float"
intHistogram = "integer histogram"
floatHistogram = "float histogram"
gaugeIntHistogram = "gauge int histogram"
gaugeFloatHistogram = "gauge float histogram"
)
type testValue struct {
@ -42,7 +48,6 @@ type sampleTypeScenario struct {
sampleFunc func(ts, value int64) sample
}
// TODO: native histogram sample types will be added as part of out-of-order native histogram support; see #11220.
var sampleTypeScenarios = map[string]sampleTypeScenario{
float: {
sampleType: sampleMetricTypeFloat,
@ -55,50 +60,50 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
return sample{t: ts, f: float64(value)}
},
},
// intHistogram: {
// sampleType: sampleMetricTypeHistogram,
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
// s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
// ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
// return ref, s, err
// },
// sampleFunc: func(ts, value int64) sample {
// return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
// },
// },
// floatHistogram: {
// sampleType: sampleMetricTypeHistogram,
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
// s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
// ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
// return ref, s, err
// },
// sampleFunc: func(ts, value int64) sample {
// return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
// },
// },
// gaugeIntHistogram: {
// sampleType: sampleMetricTypeHistogram,
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
// s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
// ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
// return ref, s, err
// },
// sampleFunc: func(ts, value int64) sample {
// return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
// },
// },
// gaugeFloatHistogram: {
// sampleType: sampleMetricTypeHistogram,
// appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
// s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
// ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
// return ref, s, err
// },
// sampleFunc: func(ts, value int64) sample {
// return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
// },
// },
intHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
},
},
floatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
},
},
gaugeIntHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
},
},
gaugeFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
},
},
}
// requireEqualSeries checks that the actual series are equal to the expected ones. It ignores the counter reset hints for histograms.

View file

@ -25,6 +25,7 @@ import (
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
@ -52,6 +53,13 @@ func retry(t *testing.T, interval time.Duration, n int, f func() bool) {
t.Logf("function returned false")
}
// Overwrite readTimeout defined in watcher.go.
func overwriteReadTimeout(t *testing.T, val time.Duration) {
initialVal := readTimeout
readTimeout = val
t.Cleanup(func() { readTimeout = initialVal })
}
type writeToMock struct {
samplesAppended int
exemplarsAppended int
@ -302,7 +310,7 @@ func TestReadToEndNoCheckpoint(t *testing.T) {
}
}
require.NoError(t, w.Log(recs...))
readTimeout = time.Second
overwriteReadTimeout(t, time.Second)
_, _, err = Segments(w.Dir())
require.NoError(t, err)
@ -394,7 +402,7 @@ func TestReadToEndWithCheckpoint(t *testing.T) {
_, _, err = Segments(w.Dir())
require.NoError(t, err)
readTimeout = time.Second
overwriteReadTimeout(t, time.Second)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
go watcher.Start()
@ -607,7 +615,7 @@ func TestCheckpointSeriesReset(t *testing.T) {
_, _, err = Segments(w.Dir())
require.NoError(t, err)
readTimeout = time.Second
overwriteReadTimeout(t, time.Second)
wt := newWriteToMock(0)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.MaxSegment = -1
@ -742,9 +750,6 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
const seriesCount = 10
const samplesCount = 50
// This test can take longer than intended to finish in cloud CI.
readTimeout := 10 * time.Second
for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} {
t.Run(string(compress), func(t *testing.T) {
dir := t.TempDir()
@ -755,36 +760,50 @@ func TestRun_AvoidNotifyWhenBehind(t *testing.T) {
w, err := NewSize(nil, nil, wdir, segmentSize, compress)
require.NoError(t, err)
var wg sync.WaitGroup
// Generate one segment initially to ensure that watcher.Run() finds at least one segment on disk.
// Write to 00000000, the watcher will read series from it.
require.NoError(t, generateWALRecords(w, 0, seriesCount, samplesCount))
w.NextSegment() // Force creation of the next segment
wg.Add(1)
go func() {
defer wg.Done()
for i := 1; i < segmentsToWrite; i++ {
require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
w.NextSegment()
}
}()
// Create 00000001, the watcher will tail it once started.
w.NextSegment()
// Set up the watcher and run it in the background.
wt := newWriteToMock(time.Millisecond)
watcher := NewWatcher(wMetrics, nil, nil, "", wt, dir, false, false, false)
watcher.setMetrics()
watcher.MaxSegment = segmentsToRead
watcher.setMetrics()
startTime := time.Now()
err = watcher.Run()
wg.Wait()
require.Less(t, time.Since(startTime), readTimeout)
// But samples records shouldn't get dropped
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() > 0
var g errgroup.Group
g.Go(func() error {
startTime := time.Now()
err = watcher.Run()
if err != nil {
return err
}
// If the watcher was to wait for readTicker to read every new segment, it would need readTimeout * segmentsToRead.
d := time.Since(startTime)
if d > readTimeout {
return fmt.Errorf("watcher ran for %s, it shouldn't rely on readTicker=%s to read the new segments", d, readTimeout)
}
return nil
})
require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
require.NoError(t, err)
// The watcher went through 00000000 and is tailing the next one.
retry(t, defaultRetryInterval, defaultRetries, func() bool {
return wt.checkNumSeries() == seriesCount
})
// In the meantime, add some new segments in bulk.
// We should end up with segmentsToWrite + 1 segments now.
for i := 1; i < segmentsToWrite; i++ {
require.NoError(t, generateWALRecords(w, i, seriesCount, samplesCount))
w.NextSegment()
}
// Wait for the watcher.
require.NoError(t, g.Wait())
// All series and samples were read.
require.Equal(t, (segmentsToRead+1)*seriesCount, wt.checkNumSeries()) // Series from 00000000 are also read.
require.Equal(t, segmentsToRead*seriesCount*samplesCount, wt.samplesAppended)
require.NoError(t, w.Close())
})
}

View file

@ -1,30 +0,0 @@
# React + TypeScript + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type aware lint rules:
- Configure the top-level `parserOptions` property like this:
```js
export default {
// other rules...
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
project: ['./tsconfig.json', './tsconfig.node.json'],
tsconfigRootDir: __dirname,
},
}
```
- Replace `plugin:@typescript-eslint/recommended` to `plugin:@typescript-eslint/recommended-type-checked` or `plugin:@typescript-eslint/strict-type-checked`
- Optionally add `plugin:@typescript-eslint/stylistic-type-checked`
- Install [eslint-plugin-react](https://github.com/jsx-eslint/eslint-plugin-react) and add `plugin:react/recommended` & `plugin:react/jsx-runtime` to the `extends` list

View file

@ -2,6 +2,7 @@ import "@mantine/core/styles.css";
import "@mantine/code-highlight/styles.css";
import "@mantine/notifications/styles.css";
import "@mantine/dates/styles.css";
import "./mantine-overrides.css";
import classes from "./App.module.css";
import PrometheusLogo from "./images/prometheus-logo.svg";
@ -67,11 +68,10 @@ import { QueryParamProvider } from "use-query-params";
import { ReactRouter6Adapter } from "use-query-params/adapters/react-router-6";
import ServiceDiscoveryPage from "./pages/service-discovery/ServiceDiscoveryPage";
import AlertmanagerDiscoveryPage from "./pages/AlertmanagerDiscoveryPage";
import { actionIconStyle, navIconStyle } from "./styles";
const queryClient = new QueryClient();
const navIconStyle = { width: rem(16), height: rem(16) };
const mainNavPages = [
{
title: "Query",
@ -322,9 +322,9 @@ function App() {
color="gray"
title="Documentation"
aria-label="Documentation"
size={32}
size={rem(32)}
>
<IconBook size={20} />
<IconBook style={actionIconStyle} />
</ActionIcon>
</>
);

View file

@ -32,7 +32,7 @@ class ErrorBoundary extends Component<Props, State> {
<Alert
color="red"
title={this.props.title || "Error querying page data"}
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
maw={500}
mx="auto"
mt="lg"

View file

@ -0,0 +1,32 @@
import { Card, Group } from "@mantine/core";
import { TablerIconsProps } from "@tabler/icons-react";
import { FC, ReactNode } from "react";
import { infoPageCardTitleIconStyle } from "../styles";
const InfoPageCard: FC<{
children: ReactNode;
title?: string;
icon?: React.ComponentType<TablerIconsProps>;
}> = ({ children, title, icon: Icon }) => {
return (
<Card shadow="xs" withBorder p="md">
{title && (
<Group
wrap="nowrap"
align="center"
ml="xs"
mb="sm"
gap="xs"
fz="xl"
fw={600}
>
{Icon && <Icon style={infoPageCardTitleIconStyle} />}
{title}
</Group>
)}
{children}
</Card>
);
};
export default InfoPageCard;

View file

@ -0,0 +1,12 @@
import { Stack } from "@mantine/core";
import { FC, ReactNode } from "react";
const InfoPageStack: FC<{ children: ReactNode }> = ({ children }) => {
return (
<Stack gap="lg" maw={1000} mx="auto" mt="xs">
{children}
</Stack>
);
};
export default InfoPageStack;

View file

@ -1,17 +1,23 @@
import { FC, PropsWithChildren, useEffect, useState } from "react";
import { IconAlertTriangle } from "@tabler/icons-react";
import { useAppDispatch } from "../state/hooks";
import { updateSettings, useSettings } from "../state/settingsSlice";
import { useSuspenseAPIQuery } from "../api/api";
import { WALReplayStatus } from "../api/responseTypes/walreplay";
import { Progress, Stack, Title } from "@mantine/core";
import { Progress, Alert } from "@mantine/core";
import { useSuspenseQuery } from "@tanstack/react-query";
const STATUS_STARTING = "is starting up...";
const STATUS_STOPPING = "is shutting down...";
const STATUS_LOADING = "is not ready...";
const ReadinessLoader: FC = () => {
const { pathPrefix } = useSettings();
const { pathPrefix, agentMode } = useSettings();
const dispatch = useAppDispatch();
// Query key is incremented every second to retrigger the status fetching.
const [queryKey, setQueryKey] = useState(0);
const [statusMessage, setStatusMessage] = useState("");
// Query readiness status.
const { data: ready } = useSuspenseQuery<boolean>({
@ -28,8 +34,16 @@ const ReadinessLoader: FC = () => {
});
switch (res.status) {
case 200:
setStatusMessage(""); // Clear any status message when ready.
return true;
case 503:
// Check the custom header `X-Prometheus-Stopping` for stopping information.
if (res.headers.get("X-Prometheus-Stopping") === "true") {
setStatusMessage(STATUS_STOPPING);
} else {
setStatusMessage(STATUS_STARTING);
}
return false;
default:
throw new Error(res.statusText);
@ -40,14 +54,16 @@ const ReadinessLoader: FC = () => {
},
});
// Query WAL replay status.
// Only call WAL replay status API if the service is starting up.
const shouldQueryWALReplay = statusMessage === STATUS_STARTING;
const {
data: {
data: { min, max, current },
},
data: walData,
isSuccess: walSuccess,
} = useSuspenseAPIQuery<WALReplayStatus>({
path: "/status/walreplay",
key: ["walreplay", queryKey],
enabled: shouldQueryWALReplay, // Only enabled when service is starting up.
});
useEffect(() => {
@ -62,21 +78,28 @@ const ReadinessLoader: FC = () => {
}, []);
return (
<Stack gap="lg" maw={1000} mx="auto" mt="xs">
<Title order={2}>Starting up...</Title>
{max > 0 && (
<Alert
color="yellow"
title={"Prometheus " + (agentMode && "Agent "||"") + (statusMessage || STATUS_LOADING)}
icon={<IconAlertTriangle/>}
maw={500}
mx="auto"
mt="lg"
>
{shouldQueryWALReplay && walSuccess && walData && (
<>
<p>
Replaying WAL ({current}/{max})
</p>
<strong>
Replaying WAL ({walData.data.current}/{walData.data.max})
</strong>
<Progress
size="xl"
animated
value={((current - min + 1) / (max - min + 1)) * 100}
color="yellow"
value={((walData.data.current - walData.data.min + 1) / (walData.data.max - walData.data.min + 1)) * 100}
/>
</>
)}
</Stack>
</Alert>
);
};

View file

@ -4,7 +4,6 @@ import {
Box,
Card,
Group,
rem,
Table,
Tooltip,
useComputedColorScheme,
@ -25,6 +24,7 @@ import {
import { PromQLExtension } from "@prometheus-io/codemirror-promql";
import { LabelBadges } from "./LabelBadges";
import { useSettings } from "../state/settingsSlice";
import { actionIconStyle, badgeIconStyle } from "../styles";
const promqlExtension = new PromQLExtension();
@ -64,7 +64,7 @@ const RuleDefinition: FC<{ rule: Rule }> = ({ rule }) => {
}}
className={codeboxClasses.queryButton}
>
<IconSearch style={{ width: rem(14) }} />
<IconSearch style={actionIconStyle} />
</ActionIcon>
</Tooltip>
</Card>
@ -74,7 +74,7 @@ const RuleDefinition: FC<{ rule: Rule }> = ({ rule }) => {
<Badge
variant="light"
styles={{ label: { textTransform: "none" } }}
leftSection={<IconClockPause size={12} />}
leftSection={<IconClockPause style={badgeIconStyle} />}
>
for: {formatPrometheusDuration(rule.duration * 1000)}
</Badge>
@ -83,7 +83,7 @@ const RuleDefinition: FC<{ rule: Rule }> = ({ rule }) => {
<Badge
variant="light"
styles={{ label: { textTransform: "none" } }}
leftSection={<IconClockPlay size={12} />}
leftSection={<IconClockPlay style={badgeIconStyle} />}
>
keep_firing_for: {formatPrometheusDuration(rule.duration * 1000)}
</Badge>

View file

@ -3,6 +3,7 @@ import { IconSettings } from "@tabler/icons-react";
import { FC } from "react";
import { useAppDispatch } from "../state/hooks";
import { updateSettings, useSettings } from "../state/settingsSlice";
import { actionIconStyle } from "../styles";
const SettingsMenu: FC = () => {
const {
@ -24,7 +25,7 @@ const SettingsMenu: FC = () => {
aria-label="Settings"
size={32}
>
<IconSettings size={20} />
<IconSettings style={actionIconStyle} />
</ActionIcon>
</Popover.Target>
<Popover.Dropdown>

View file

@ -10,6 +10,7 @@ import {
useCombobox,
} from "@mantine/core";
import { IconHeartRateMonitor } from "@tabler/icons-react";
import { inputIconStyle } from "../styles";
interface StatePillProps extends React.ComponentPropsWithoutRef<"div"> {
value: string;
@ -80,7 +81,7 @@ export const StateMultiSelect: FC<StateMultiSelectProps> = ({
pointer
onClick={() => combobox.toggleDropdown()}
miw={200}
leftSection={<IconHeartRateMonitor size={14} />}
leftSection={<IconHeartRateMonitor style={inputIconStyle} />}
rightSection={
values.length > 0 ? (
<ComboboxClearButton onClear={() => onChange([])} />

View file

@ -0,0 +1,104 @@
import { humanizeDuration, formatPrometheusDuration } from "./formatTime";
describe("formatPrometheusDuration", () => {
test('returns "0s" for 0 milliseconds', () => {
expect(formatPrometheusDuration(0)).toBe("0s");
});
test("formats milliseconds correctly", () => {
expect(formatPrometheusDuration(1)).toBe("1ms");
expect(formatPrometheusDuration(999)).toBe("999ms");
});
test("formats seconds correctly", () => {
expect(formatPrometheusDuration(1000)).toBe("1s");
expect(formatPrometheusDuration(1500)).toBe("1s500ms");
expect(formatPrometheusDuration(59999)).toBe("59s999ms");
});
test("formats minutes correctly", () => {
expect(formatPrometheusDuration(60000)).toBe("1m");
expect(formatPrometheusDuration(120000)).toBe("2m");
expect(formatPrometheusDuration(3599999)).toBe("59m59s999ms");
});
test("formats hours correctly", () => {
expect(formatPrometheusDuration(3600000)).toBe("1h");
expect(formatPrometheusDuration(7200000)).toBe("2h");
expect(formatPrometheusDuration(86399999)).toBe("23h59m59s999ms");
});
test("formats days correctly", () => {
expect(formatPrometheusDuration(86400000)).toBe("1d");
expect(formatPrometheusDuration(172800000)).toBe("2d");
expect(formatPrometheusDuration(86400000 * 365 - 1)).toBe(
"364d23h59m59s999ms"
);
});
test("handles negative durations", () => {
expect(formatPrometheusDuration(-1000)).toBe("-1s");
expect(formatPrometheusDuration(-86400000)).toBe("-1d");
});
test("combines multiple units correctly", () => {
expect(
formatPrometheusDuration(86400000 + 3600000 + 60000 + 1000 + 1)
).toBe("1d1h1m1s1ms");
});
test("omits zero values", () => {
expect(formatPrometheusDuration(86400000 + 1000)).toBe("1d1s");
});
});
describe("humanizeDuration", () => {
test('returns "0s" for 0 milliseconds', () => {
expect(humanizeDuration(0)).toBe("0s");
});
test("formats milliseconds correctly", () => {
expect(humanizeDuration(1)).toBe("1ms");
expect(humanizeDuration(999)).toBe("999ms");
});
test("formats seconds correctly", () => {
expect(humanizeDuration(1000)).toBe("1s");
expect(humanizeDuration(1500)).toBe("1.5s");
expect(humanizeDuration(59999)).toBe("59.999s");
});
test("formats minutes correctly", () => {
expect(humanizeDuration(60000)).toBe("1m");
expect(humanizeDuration(120000)).toBe("2m");
expect(humanizeDuration(3599999)).toBe("59m 59.999s");
});
test("formats hours correctly", () => {
expect(humanizeDuration(3600000)).toBe("1h");
expect(humanizeDuration(7200000)).toBe("2h");
expect(humanizeDuration(86399999)).toBe("23h 59m 59.999s");
});
test("formats days correctly", () => {
expect(humanizeDuration(86400000)).toBe("1d");
expect(humanizeDuration(172800000)).toBe("2d");
expect(humanizeDuration(86400000 * 365 - 1)).toBe("364d 23h 59m 59.999s");
expect(humanizeDuration(86400000 * 365 - 1)).toBe("364d 23h 59m 59.999s");
});
test("handles negative durations", () => {
expect(humanizeDuration(-1000)).toBe("-1s");
expect(humanizeDuration(-86400000)).toBe("-1d");
});
test("combines multiple units correctly", () => {
expect(humanizeDuration(86400000 + 3600000 + 60000 + 1000 + 1)).toBe(
"1d 1h 1m 1.001s"
);
});
test("omits zero values", () => {
expect(humanizeDuration(86400000 + 1000)).toBe("1d 1s");
});
});

View file

@ -45,37 +45,55 @@ export const parsePrometheusDuration = (durationStr: string): number | null => {
return dur;
};
// Format a duration in milliseconds into a Prometheus duration string like "1d2h3m4s".
export const formatPrometheusDuration = (d: number): string => {
let ms = d;
let r = "";
if (ms === 0) {
// Used by:
// - formatPrometheusDuration() => "5d5m2s123ms"
// - humanizeDuration() => "5d 5m 2.123s"
const formatDuration = (
d: number,
componentSeparator?: string,
showFractionalSeconds?: boolean
): string => {
if (d === 0) {
return "0s";
}
const f = (unit: string, mult: number, exact: boolean) => {
const sign = d < 0 ? "-" : "";
let ms = Math.abs(d);
const r: string[] = [];
for (const { unit, mult, exact } of [
// Only format years and weeks if the remainder is zero, as it is often
// easier to read 90d than 12w6d.
{ unit: "y", mult: 1000 * 60 * 60 * 24 * 365, exact: true },
{ unit: "w", mult: 1000 * 60 * 60 * 24 * 7, exact: true },
{ unit: "d", mult: 1000 * 60 * 60 * 24, exact: false },
{ unit: "h", mult: 1000 * 60 * 60, exact: false },
{ unit: "m", mult: 1000 * 60, exact: false },
{ unit: "s", mult: 1000, exact: false },
{ unit: "ms", mult: 1, exact: false },
]) {
if (exact && ms % mult !== 0) {
return;
continue;
}
const v = Math.floor(ms / mult);
if (v > 0) {
r += `${v}${unit}`;
ms -= v * mult;
if (showFractionalSeconds && unit === "s" && ms > 0) {
// Show "2.34s" instead of "2s 340ms".
r.push(`${parseFloat((v + ms / 1000).toFixed(3))}s`);
break;
} else {
r.push(`${v}${unit}`);
}
}
};
}
// Only format years and weeks if the remainder is zero, as it is often
// easier to read 90d than 12w6d.
f("y", 1000 * 60 * 60 * 24 * 365, true);
f("w", 1000 * 60 * 60 * 24 * 7, true);
return sign + r.join(componentSeparator || "");
};
f("d", 1000 * 60 * 60 * 24, false);
f("h", 1000 * 60 * 60, false);
f("m", 1000 * 60, false);
f("s", 1000, false);
f("ms", 1, false);
return r;
// Format a duration in milliseconds into a Prometheus duration string like "1d2h3m4s".
export const formatPrometheusDuration = (d: number): string => {
return formatDuration(d);
};
export function parseTime(timeText: string): number {
@ -85,37 +103,7 @@ export function parseTime(timeText: string): number {
export const now = (): number => dayjs().valueOf();
export const humanizeDuration = (milliseconds: number): string => {
if (milliseconds === 0) {
return "0s";
}
const sign = milliseconds < 0 ? "-" : "";
const duration = dayjs.duration(Math.abs(milliseconds), "ms");
const ms = Math.floor(duration.milliseconds());
const s = Math.floor(duration.seconds());
const m = Math.floor(duration.minutes());
const h = Math.floor(duration.hours());
const d = Math.floor(duration.asDays());
const parts: string[] = [];
if (d !== 0) {
parts.push(`${d}d`);
}
if (h !== 0) {
parts.push(`${h}h`);
}
if (m !== 0) {
parts.push(`${m}m`);
}
if (s !== 0) {
if (ms !== 0) {
parts.push(`${s}.${ms}s`);
} else {
parts.push(`${s}s`);
}
} else if (milliseconds !== 0) {
parts.push(`${milliseconds.toFixed(3)}ms`);
}
return sign + parts.join(" ");
return formatDuration(milliseconds, " ", true);
};
export const humanizeDurationRelative = (

View file

@ -0,0 +1,4 @@
.mantine-Badge-label {
overflow: unset;
text-overflow: unset;
}

View file

@ -1,16 +1,16 @@
import { Card, Group, Text } from "@mantine/core";
import { Text } from "@mantine/core";
import { IconSpy } from "@tabler/icons-react";
import { FC } from "react";
import InfoPageStack from "../components/InfoPageStack";
import InfoPageCard from "../components/InfoPageCard";
const AgentPage: FC = () => {
return (
<Card shadow="xs" withBorder p="md" mt="xs">
<Group wrap="nowrap" align="center" ml="xs" mb="sm" gap="xs">
<IconSpy size={22} />
<Text fz="xl" fw={600}>
Prometheus Agent
</Text>
</Group>
<InfoPageStack>
<InfoPageCard
title="Prometheus Agent"
icon={IconSpy}
>
<Text p="md">
This Prometheus instance is running in <strong>agent mode</strong>. In
this mode, Prometheus is only used to scrape discovered targets and
@ -18,9 +18,10 @@ const AgentPage: FC = () => {
</Text>
<Text p="md">
Some features are not available in this mode, such as querying and
alerting.
</Text>
</Card>
alerting.
</Text>
</InfoPageCard>
</InfoPageStack>
);
};

View file

@ -1,9 +1,11 @@
import { Alert, Card, Group, Stack, Table, Text } from "@mantine/core";
import { Alert, Table } from "@mantine/core";
import { IconBell, IconBellOff, IconInfoCircle } from "@tabler/icons-react";
import { useSuspenseAPIQuery } from "../api/api";
import { AlertmanagersResult } from "../api/responseTypes/alertmanagers";
import EndpointLink from "../components/EndpointLink";
import InfoPageCard from "../components/InfoPageCard";
import InfoPageStack from "../components/InfoPageStack";
export const targetPoolDisplayLimit = 20;
@ -18,14 +20,8 @@ export default function AlertmanagerDiscoveryPage() {
});
return (
<Stack gap="lg" maw={1000} mx="auto" mt="xs">
<Card shadow="xs" withBorder p="md">
<Group wrap="nowrap" align="center" ml="xs" mb="sm" gap="xs">
<IconBell size={22} />
<Text fz="xl" fw={600}>
Active Alertmanagers
</Text>
</Group>
<InfoPageStack>
<InfoPageCard title="Active Alertmanagers" icon={IconBell}>
{activeAlertmanagers.length === 0 ? (
<Alert title="No active alertmanagers" icon={<IconInfoCircle />}>
No active alertmanagers found.
@ -46,14 +42,8 @@ export default function AlertmanagerDiscoveryPage() {
</Table.Tbody>
</Table>
)}
</Card>
<Card shadow="xs" withBorder p="md">
<Group wrap="nowrap" align="center" ml="xs" mb="sm" gap="xs">
<IconBellOff size={22} />
<Text fz="xl" fw={600}>
Dropped Alertmanagers
</Text>
</Group>
</InfoPageCard>
<InfoPageCard title="Dropped Alertmanagers" icon={IconBellOff}>
{droppedAlertmanagers.length === 0 ? (
<Alert title="No dropped alertmanagers" icon={<IconInfoCircle />}>
No dropped alertmanagers found.
@ -74,7 +64,7 @@ export default function AlertmanagerDiscoveryPage() {
</Table.Tbody>
</Table>
)}
</Card>
</Stack>
</InfoPageCard>
</InfoPageStack>
);
}

View file

@ -32,6 +32,7 @@ import {
} from "use-query-params";
import { useDebouncedValue } from "@mantine/hooks";
import { KVSearch } from "@nexucis/kvsearch";
import { inputIconStyle } from "../styles";
type AlertsPageData = {
// How many rules are in each state across all groups.
@ -190,7 +191,7 @@ export default function AlertsPage() {
/>
<TextInput
flex={1}
leftSection={<IconSearch size={14} />}
leftSection={<IconSearch style={inputIconStyle} />}
placeholder="Filter by rule name or labels"
value={searchFilter || ""}
onChange={(event) =>
@ -199,7 +200,7 @@ export default function AlertsPage() {
></TextInput>
</Group>
{alertsPageData.groups.length === 0 ? (
<Alert title="No rules found" icon={<IconInfoCircle size={14} />}>
<Alert title="No rules found" icon={<IconInfoCircle />}>
No rules found.
</Alert>
) : (
@ -207,7 +208,7 @@ export default function AlertsPage() {
alertsPageData.groups.length !== shownGroups.length && (
<Alert
title="Hiding groups with no matching rules"
icon={<IconInfoCircle size={14} />}
icon={<IconInfoCircle/>}
>
Hiding {alertsPageData.groups.length - shownGroups.length} empty
groups due to filters or no rules.
@ -326,7 +327,7 @@ export default function AlertsPage() {
{r.rule.alerts.length > 0 && (
<Table mt="lg">
<Table.Thead>
<Table.Tr>
<Table.Tr style={{whiteSpace: "nowrap"}}>
<Table.Th>Alert labels</Table.Th>
<Table.Th>State</Table.Th>
<Table.Th>Active Since</Table.Th>

View file

@ -8,7 +8,6 @@ import {
TextInput,
rem,
keys,
Card,
} from "@mantine/core";
import {
IconSelector,
@ -18,6 +17,9 @@ import {
} from "@tabler/icons-react";
import classes from "./FlagsPage.module.css";
import { useSuspenseAPIQuery } from "../api/api";
import InfoPageStack from "../components/InfoPageStack";
import InfoPageCard from "../components/InfoPageCard";
import { inputIconStyle } from "../styles";
interface RowData {
flag: string;
@ -124,59 +126,56 @@ export default function FlagsPage() {
));
return (
<Card shadow="xs" maw={1000} mx="auto" mt="xs" withBorder>
<TextInput
placeholder="Filter by flag name or value"
mb="md"
autoFocus
leftSection={
<IconSearch
style={{ width: rem(16), height: rem(16) }}
stroke={1.5}
/>
}
value={search}
onChange={handleSearchChange}
/>
<Table
horizontalSpacing="md"
verticalSpacing="xs"
miw={700}
layout="fixed"
>
<Table.Tbody>
<Table.Tr>
<Th
sorted={sortBy === "flag"}
reversed={reverseSortDirection}
onSort={() => setSorting("flag")}
>
Flag
</Th>
<Th
sorted={sortBy === "value"}
reversed={reverseSortDirection}
onSort={() => setSorting("value")}
>
Value
</Th>
</Table.Tr>
</Table.Tbody>
<Table.Tbody>
{rows.length > 0 ? (
rows
) : (
<InfoPageStack>
<InfoPageCard>
<TextInput
placeholder="Filter by flag name or value"
mb="md"
autoFocus
leftSection={<IconSearch style={inputIconStyle} />}
value={search}
onChange={handleSearchChange}
/>
<Table
horizontalSpacing="md"
verticalSpacing="xs"
miw={700}
layout="fixed"
>
<Table.Tbody>
<Table.Tr>
<Table.Td colSpan={2}>
<Text fw={500} ta="center">
Nothing found
</Text>
</Table.Td>
<Th
sorted={sortBy === "flag"}
reversed={reverseSortDirection}
onSort={() => setSorting("flag")}
>
Flag
</Th>
<Th
sorted={sortBy === "value"}
reversed={reverseSortDirection}
onSort={() => setSorting("value")}
>
Value
</Th>
</Table.Tr>
)}
</Table.Tbody>
</Table>
</Card>
</Table.Tbody>
<Table.Tbody>
{rows.length > 0 ? (
rows
) : (
<Table.Tr>
<Table.Td colSpan={2}>
<Text fw={500} ta="center">
Nothing found
</Text>
</Table.Td>
</Table.Tr>
)}
</Table.Tbody>
</Table>
</InfoPageCard>
</InfoPageStack>
);
}

View file

@ -27,6 +27,7 @@ import { useSuspenseAPIQuery } from "../api/api";
import { RulesResult } from "../api/responseTypes/rules";
import badgeClasses from "../Badge.module.css";
import RuleDefinition from "../components/RuleDefinition";
import { badgeIconStyle } from "../styles";
const healthBadgeClass = (state: string) => {
switch (state) {
@ -47,7 +48,7 @@ export default function RulesPage() {
return (
<Stack mt="xs">
{data.data.groups.length === 0 && (
<Alert title="No rule groups" icon={<IconInfoCircle size={14} />}>
<Alert title="No rule groups" icon={<IconInfoCircle />}>
No rule groups configured.
</Alert>
)}
@ -74,7 +75,7 @@ export default function RulesPage() {
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh size={12} />}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
last run {humanizeDurationRelative(g.lastEvaluation, now())}
</Badge>
@ -84,7 +85,7 @@ export default function RulesPage() {
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconHourglass size={12} />}
leftSection={<IconHourglass style={badgeIconStyle} />}
>
took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)}
</Badge>
@ -94,7 +95,7 @@ export default function RulesPage() {
variant="transparent"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRepeat size={12} />}
leftSection={<IconRepeat style={badgeIconStyle} />}
>
every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "}
</Badge>
@ -102,7 +103,7 @@ export default function RulesPage() {
</Group>
</Group>
{g.rules.length === 0 && (
<Alert title="No rules" icon={<IconInfoCircle size={14} />}>
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in rule group.
</Alert>
)}
@ -150,7 +151,7 @@ export default function RulesPage() {
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh size={12} />}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
{humanizeDurationRelative(r.lastEvaluation, now())}
</Badge>
@ -164,7 +165,9 @@ export default function RulesPage() {
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconHourglass size={12} />}
leftSection={
<IconHourglass style={badgeIconStyle} />
}
>
{humanizeDuration(
parseFloat(r.evaluationTime) * 1000
@ -185,7 +188,7 @@ export default function RulesPage() {
color="red"
mt="sm"
title="Rule failed to evaluate"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
<strong>Error:</strong> {r.lastError}
</Alert>

View file

@ -1,8 +1,10 @@
import { Card, Group, Stack, Table, Text } from "@mantine/core";
import { Table } from "@mantine/core";
import { useSuspenseAPIQuery } from "../api/api";
import { IconRun, IconWall } from "@tabler/icons-react";
import { formatTimestamp } from "../lib/formatTime";
import { useSettings } from "../state/settingsSlice";
import InfoPageCard from "../components/InfoPageCard";
import InfoPageStack from "../components/InfoPageStack";
export default function StatusPage() {
const { data: buildinfo } = useSuspenseAPIQuery<Record<string, string>>({
@ -42,14 +44,8 @@ export default function StatusPage() {
};
return (
<Stack gap="lg" maw={1000} mx="auto" mt="xs">
<Card shadow="xs" withBorder p="md">
<Group wrap="nowrap" align="center" ml="xs" mb="sm" gap="xs">
<IconWall size={22} />
<Text fz="xl" fw={600}>
Build information
</Text>
</Group>
<InfoPageStack>
<InfoPageCard title="Build information" icon={IconWall}>
<Table layout="fixed">
<Table.Tbody>
{Object.entries(buildinfo.data).map(([k, v]) => (
@ -60,14 +56,8 @@ export default function StatusPage() {
))}
</Table.Tbody>
</Table>
</Card>
<Card shadow="xs" withBorder p="md">
<Group wrap="nowrap" align="center" ml="xs" mb="sm" gap="xs">
<IconRun size={22} />
<Text fz="xl" fw={600}>
Runtime information
</Text>
</Group>
</InfoPageCard>
<InfoPageCard title="Runtime information" icon={IconRun}>
<Table layout="fixed">
<Table.Tbody>
{Object.entries(runtimeinfo.data).map(([k, v]) => {
@ -84,7 +74,7 @@ export default function StatusPage() {
})}
</Table.Tbody>
</Table>
</Card>
</Stack>
</InfoPageCard>
</InfoPageStack>
);
}

View file

@ -1,8 +1,10 @@
import { Stack, Card, Table, Text } from "@mantine/core";
import { Table } from "@mantine/core";
import { useSuspenseAPIQuery } from "../api/api";
import { TSDBStatusResult } from "../api/responseTypes/tsdbStatus";
import { formatTimestamp } from "../lib/formatTime";
import { useSettings } from "../state/settingsSlice";
import InfoPageStack from "../components/InfoPageStack";
import InfoPageCard from "../components/InfoPageCard";
export default function TSDBStatusPage() {
const {
@ -41,7 +43,7 @@ export default function TSDBStatusPage() {
];
return (
<Stack gap="lg" maw={1000} mx="auto" mt="xs">
<InfoPageStack>
{[
{
title: "TSDB Head Status",
@ -70,10 +72,7 @@ export default function TSDBStatusPage() {
formatAsCode: true,
},
].map(({ title, unit = "Count", stats, formatAsCode }) => (
<Card shadow="xs" withBorder p="md">
<Text fz="xl" fw={600} ml="xs" mb="sm">
{title}
</Text>
<InfoPageCard title={title}>
<Table layout="fixed">
<Table.Thead>
<Table.Tr>
@ -82,24 +81,22 @@ export default function TSDBStatusPage() {
</Table.Tr>
</Table.Thead>
<Table.Tbody>
{stats.map(({ name, value }) => {
return (
<Table.Tr key={name}>
<Table.Td
style={{
wordBreak: "break-all",
}}
>
{formatAsCode ? <code>{name}</code> : name}
</Table.Td>
<Table.Td>{value}</Table.Td>
</Table.Tr>
);
})}
{stats.map(({ name, value }) => (
<Table.Tr key={name}>
<Table.Td
style={{
wordBreak: "break-all",
}}
>
{formatAsCode ? <code>{name}</code> : name}
</Table.Td>
<Table.Td>{value}</Table.Td>
</Table.Tr>
))}
</Table.Tbody>
</Table>
</Card>
</InfoPageCard>
))}
</Stack>
</InfoPageStack>
);
}

View file

@ -64,7 +64,7 @@ const DataTable: FC<DataTableProps> = ({
result.length > maxDisplayableSeries && (
<Alert
color="orange"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
title="Showing limited results"
>
Fetched {data.result.length} metrics, only displaying first{" "}
@ -76,10 +76,7 @@ const DataTable: FC<DataTableProps> = ({
)}
{!doFormat && (
<Alert
title="Formatting turned off"
icon={<IconInfoCircle size={14} />}
>
<Alert title="Formatting turned off" icon={<IconInfoCircle />}>
Showing more than {maxFormattableSeries} series, turning off label
formatting to improve rendering performance.
</Alert>
@ -166,7 +163,7 @@ const DataTable: FC<DataTableProps> = ({
<Alert
color="red"
title="Invalid query response"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
Invalid result value type
</Alert>

View file

@ -361,317 +361,296 @@ const VectorVectorBinaryExprExplainView: FC<
<>
<Text size="sm">{explanationText(node)}</Text>
{!isSetOperator(node.op) && (
<>
<Group my="lg" justify="flex-end" gap="xl">
{/* <Switch
label="Break long lines"
checked={allowLineBreaks}
onChange={(event) =>
setAllowLineBreaks(event.currentTarget.checked)
}
/> */}
<Switch
label="Show sample values"
checked={showSampleValues}
onChange={(event) =>
setShowSampleValues(event.currentTarget.checked)
}
/>
</Group>
<Group my="lg" justify="flex-end" gap="xl">
<Switch
label="Show sample values"
checked={showSampleValues}
onChange={(event) => setShowSampleValues(event.currentTarget.checked)}
/>
</Group>
{numGroups > Object.keys(matchGroups).length && (
<Alert
color="yellow"
mb="md"
icon={<IconAlertTriangle size={14} />}
>
Too many match groups to display, only showing{" "}
{Object.keys(matchGroups).length} out of {numGroups} groups.
<br />
<br />
<Anchor fz="sm" onClick={() => setMaxGroups(undefined)}>
Show all groups
</Anchor>
</Alert>
)}
{errCount > 0 && (
<Alert
color="yellow"
mb="md"
icon={<IconAlertTriangle size={14} />}
>
Found matching issues in {errCount} match group
{errCount > 1 ? "s" : ""}. See below for per-group error details.
</Alert>
)}
<Table fz="xs" withRowBorders={false}>
<Table.Tbody>
{Object.values(matchGroups).map((mg, mgIdx) => {
const {
groupLabels,
lhs,
lhsCount,
rhs,
rhsCount,
result,
error,
} = mg;
const matchGroupTitleRow = (color: string) => (
<Table.Tr ta="center">
<Table.Td
colSpan={2}
style={{ backgroundColor: `${color}25` }}
>
<SeriesName labels={groupLabels} format={true} />
</Table.Td>
</Table.Tr>
);
const matchGroupTable = (
series: InstantSample[],
seriesCount: number,
color: string,
colorOffset?: number
) => (
<Box
style={{
borderRadius: 3,
border: "2px solid",
borderColor:
series.length === 0
? "light-dark(var(--mantine-color-gray-4), var(--mantine-color-gray-7))"
: color,
}}
>
<Table fz="xs" withRowBorders={false} verticalSpacing={5}>
<Table.Tbody>
{series.length === 0 ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
no matching series
</Table.Td>
</Table.Tr>
) : (
<>
{matchGroupTitleRow(color)}
{series.map((s, sIdx) => {
if (s.value === undefined) {
// TODO: Figure out how to handle native histograms.
throw new Error(
"Native histograms are not supported yet"
);
}
return (
<Table.Tr key={sIdx}>
<Table.Td>
<Group wrap="nowrap" gap={7} align="center">
{seriesSwatch(
colorForIndex(sIdx, colorOffset)
)}
<SeriesName
labels={noMatchLabels(
s.metric,
matching.on,
matching.labels
)}
format={true}
/>
</Group>
</Table.Td>
{showSampleValues && (
<Table.Td ta="right">{s.value[1]}</Table.Td>
)}
</Table.Tr>
);
})}
</>
)}
{seriesCount > series.length && (
<Table.Tr>
<Table.Td ta="center" py="md" fw="bold" c="gray.6">
{seriesCount - series.length} more series omitted
&nbsp;&nbsp;&nbsp;&nbsp;
<Anchor
size="xs"
onClick={() => setMaxSeriesPerGroup(undefined)}
>
Show all series
</Anchor>
</Table.Td>
</Table.Tr>
)}
</Table.Tbody>
</Table>
</Box>
);
const noLHSMatches = lhs.length === 0;
const noRHSMatches = rhs.length === 0;
const groupColor = colorPool[mgIdx % colorPool.length];
const lhsTable = matchGroupTable(lhs, lhsCount, groupColor);
const rhsTable = matchGroupTable(
rhs,
rhsCount,
groupColor,
rhsColorOffset
);
const resultTable = (
<Box
style={{
borderRadius: 3,
border: `2px solid`,
borderColor:
noLHSMatches || noRHSMatches || error !== null
? "light-dark(var(--mantine-color-gray-4), var(--mantine-color-gray-7))"
: groupColor,
}}
>
<Table fz="xs" withRowBorders={false} verticalSpacing={5}>
<Table.Tbody>
{noLHSMatches || noRHSMatches ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
dropped
</Table.Td>
</Table.Tr>
) : error !== null ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
error, result omitted
</Table.Td>
</Table.Tr>
) : (
<>
{result.map(({ sample, manySideIdx }, resIdx) => {
if (sample.value === undefined) {
// TODO: Figure out how to handle native histograms.
throw new Error(
"Native histograms are not supported yet"
);
}
const filtered =
sample.value[1] === filteredSampleValue;
const [lIdx, rIdx] =
matching.card ===
vectorMatchCardinality.oneToMany
? [0, manySideIdx]
: [manySideIdx, 0];
return (
<Table.Tr key={resIdx}>
<Table.Td
style={{ opacity: filtered ? 0.5 : 1 }}
title={
filtered
? "Series has been filtered by comparison operator"
: undefined
}
>
<Group
wrap="nowrap"
gap="xs"
align="flex-start"
>
<Group wrap="nowrap" gap={0}>
{seriesSwatch(colorForIndex(lIdx))}
<span style={{ color: "#aaa" }}></span>
{seriesSwatch(
colorForIndex(rIdx, rhsColorOffset)
)}
</Group>
<SeriesName
labels={sample.metric}
format={true}
/>
</Group>
</Table.Td>
{showSampleValues && (
<Table.Td ta="right">
{filtered ? (
<span style={{ color: "grey" }}>
filtered
</span>
) : (
<span>{sample.value[1]}</span>
)}
</Table.Td>
)}
</Table.Tr>
);
})}
</>
)}
</Table.Tbody>
</Table>
</Box>
);
return (
<React.Fragment key={mgIdx}>
{mgIdx !== 0 && <tr style={{ height: 30 }}></tr>}
<Table.Tr>
<Table.Td colSpan={5}>
{error && (
<Alert
color="red"
mb="md"
title="Error in match group below"
icon={<IconAlertTriangle size={14} />}
>
{explainError(node, mg, error)}
</Alert>
)}
</Table.Td>
</Table.Tr>
<Table.Tr>
<Table.Td valign="middle" p={0}>
{lhsTable}
</Table.Td>
<Table.Td ta="center">
{node.op}
{node.bool && " bool"}
</Table.Td>
<Table.Td valign="middle" p={0}>
{rhsTable}
</Table.Td>
<Table.Td ta="center">=</Table.Td>
<Table.Td valign="middle" p={0}>
{resultTable}
</Table.Td>
</Table.Tr>
</React.Fragment>
);
})}
</Table.Tbody>
</Table>
</>
{numGroups > Object.keys(matchGroups).length && (
<Alert color="yellow" mb="md" icon={<IconAlertTriangle />}>
Too many match groups to display, only showing{" "}
{Object.keys(matchGroups).length} out of {numGroups} groups.
<br />
<br />
<Anchor fz="sm" onClick={() => setMaxGroups(undefined)}>
Show all groups
</Anchor>
</Alert>
)}
{errCount > 0 && (
<Alert color="yellow" mb="md" icon={<IconAlertTriangle />}>
Found matching issues in {errCount} match group
{errCount > 1 ? "s" : ""}. See below for per-group error details.
</Alert>
)}
<Table fz="xs" withRowBorders={false}>
<Table.Tbody>
{Object.values(matchGroups).map((mg, mgIdx) => {
const { groupLabels, lhs, lhsCount, rhs, rhsCount, result, error } =
mg;
const matchGroupTitleRow = (color: string) => (
<Table.Tr ta="center">
<Table.Td colSpan={2} style={{ backgroundColor: `${color}25` }}>
<SeriesName labels={groupLabels} format={true} />
</Table.Td>
</Table.Tr>
);
const matchGroupTable = (
series: InstantSample[],
seriesCount: number,
color: string,
colorOffset?: number
) => (
<Box
style={{
borderRadius: 3,
border: "2px solid",
borderColor:
seriesCount === 0
? "light-dark(var(--mantine-color-gray-4), var(--mantine-color-gray-7))"
: color,
}}
>
<Table fz="xs" withRowBorders={false} verticalSpacing={5}>
<Table.Tbody>
{seriesCount === 0 ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
no matching series
</Table.Td>
</Table.Tr>
) : (
<>
{matchGroupTitleRow(color)}
{series.map((s, sIdx) => {
if (s.value === undefined) {
// TODO: Figure out how to handle native histograms.
throw new Error(
"Native histograms are not supported yet"
);
}
return (
<Table.Tr key={sIdx}>
<Table.Td>
<Group wrap="nowrap" gap={7} align="center">
{seriesSwatch(
colorForIndex(sIdx, colorOffset)
)}
<SeriesName
labels={noMatchLabels(
s.metric,
matching.on,
matching.labels
)}
format={true}
/>
</Group>
</Table.Td>
{showSampleValues && (
<Table.Td ta="right">{s.value[1]}</Table.Td>
)}
</Table.Tr>
);
})}
</>
)}
{seriesCount > series.length && (
<Table.Tr>
<Table.Td ta="center" py="md" fw="bold" c="gray.6">
{seriesCount - series.length} more series omitted
&nbsp;&nbsp;&nbsp;&nbsp;
<Anchor
size="xs"
onClick={() => setMaxSeriesPerGroup(undefined)}
>
Show all series
</Anchor>
</Table.Td>
</Table.Tr>
)}
</Table.Tbody>
</Table>
</Box>
);
const groupColor = colorPool[mgIdx % colorPool.length];
const lhsTable = matchGroupTable(lhs, lhsCount, groupColor);
const rhsTable = matchGroupTable(
rhs,
rhsCount,
groupColor,
rhsColorOffset
);
const resultTable = (
<Box
style={{
borderRadius: 3,
border: `2px solid`,
borderColor:
result.length === 0 || error !== null
? "light-dark(var(--mantine-color-gray-4), var(--mantine-color-gray-7))"
: groupColor,
}}
>
<Table fz="xs" withRowBorders={false} verticalSpacing={5}>
<Table.Tbody>
{error !== null ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
error, result omitted
</Table.Td>
</Table.Tr>
) : result.length === 0 ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
dropped
</Table.Td>
</Table.Tr>
) : error !== null ? (
<Table.Tr>
<Table.Td
ta="center"
c="light-dark(var(--mantine-color-gray-7), var(--mantine-color-gray-5))"
py="md"
fw="bold"
>
error, result omitted
</Table.Td>
</Table.Tr>
) : (
<>
{result.map(({ sample, manySideIdx }, resIdx) => {
if (sample.value === undefined) {
// TODO: Figure out how to handle native histograms.
throw new Error(
"Native histograms are not supported yet"
);
}
const filtered =
sample.value[1] === filteredSampleValue;
const [lIdx, rIdx] =
matching.card === vectorMatchCardinality.oneToMany
? [0, manySideIdx]
: [manySideIdx, 0];
return (
<Table.Tr key={resIdx}>
<Table.Td
style={{ opacity: filtered ? 0.5 : 1 }}
title={
filtered
? "Series has been filtered by comparison operator"
: undefined
}
>
<Group
wrap="nowrap"
gap="xs"
align="flex-start"
>
<Group wrap="nowrap" gap={0}>
{seriesSwatch(colorForIndex(lIdx))}
<span style={{ color: "#aaa" }}></span>
{seriesSwatch(
colorForIndex(rIdx, rhsColorOffset)
)}
</Group>
<SeriesName
labels={sample.metric}
format={true}
/>
</Group>
</Table.Td>
{showSampleValues && (
<Table.Td ta="right">
{filtered ? (
<span style={{ color: "grey" }}>
filtered
</span>
) : (
<span>{sample.value[1]}</span>
)}
</Table.Td>
)}
</Table.Tr>
);
})}
</>
)}
</Table.Tbody>
</Table>
</Box>
);
return (
<React.Fragment key={mgIdx}>
{mgIdx !== 0 && <tr style={{ height: 30 }}></tr>}
<Table.Tr>
<Table.Td colSpan={5}>
{error && (
<Alert
color="red"
mb="md"
title="Error in match group below"
icon={<IconAlertTriangle />}
>
{explainError(node, mg, error)}
</Alert>
)}
</Table.Td>
</Table.Tr>
<Table.Tr>
<Table.Td valign="middle" p={0}>
{lhsTable}
</Table.Td>
<Table.Td
ta="center"
fw={isSetOperator(node.op) ? "bold" : undefined}
>
{node.op}
{node.bool && " bool"}
</Table.Td>
<Table.Td valign="middle" p={0}>
{rhsTable}
</Table.Td>
<Table.Td ta="center">=</Table.Td>
<Table.Td valign="middle" p={0}>
{resultTable}
</Table.Td>
</Table.Tr>
</React.Fragment>
);
})}
</Table.Tbody>
</Table>
</>
);
};

View file

@ -7,7 +7,6 @@ import {
Loader,
Menu,
Modal,
rem,
Skeleton,
useComputedColorScheme,
} from "@mantine/core";
@ -70,6 +69,7 @@ import { useSettings } from "../../state/settingsSlice";
import MetricsExplorer from "./MetricsExplorer/MetricsExplorer";
import ErrorBoundary from "../../components/ErrorBoundary";
import { useAppSelector } from "../../state/hooks";
import { inputIconStyle, menuIconStyle } from "../../styles";
const promqlExtension = new PromQLExtension();
@ -224,25 +224,19 @@ const ExpressionInput: FC<ExpressionInputProps> = ({
color="gray"
aria-label="Show query options"
>
<IconDotsVertical style={{ width: "1rem", height: "1rem" }} />
<IconDotsVertical style={inputIconStyle} />
</ActionIcon>
</Menu.Target>
<Menu.Dropdown>
<Menu.Label>Query options</Menu.Label>
<Menu.Item
leftSection={
<IconSearch style={{ width: rem(14), height: rem(14) }} />
}
leftSection={<IconSearch style={menuIconStyle} />}
onClick={() => setShowMetricsExplorer(true)}
>
Explore metrics
</Menu.Item>
<Menu.Item
leftSection={
<IconAlignJustified
style={{ width: rem(14), height: rem(14) }}
/>
}
leftSection={<IconAlignJustified style={menuIconStyle} />}
onClick={() => formatQuery()}
disabled={
isFormatting || expr === "" || expr === formatResult?.data
@ -251,18 +245,14 @@ const ExpressionInput: FC<ExpressionInputProps> = ({
Format expression
</Menu.Item>
<Menu.Item
leftSection={
<IconBinaryTree style={{ width: rem(14), height: rem(14) }} />
}
leftSection={<IconBinaryTree style={menuIconStyle} />}
onClick={() => setShowTree(!treeShown)}
>
{treeShown ? "Hide" : "Show"} tree view
</Menu.Item>
<Menu.Item
color="red"
leftSection={
<IconTrash style={{ width: rem(14), height: rem(14) }} />
}
leftSection={<IconTrash style={menuIconStyle} />}
onClick={removePanel}
>
Remove query

View file

@ -131,7 +131,7 @@ const Graph: FC<GraphProps> = ({
<Alert
color="red"
title="Error executing query"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
{error.message}
</Alert>
@ -146,7 +146,7 @@ const Graph: FC<GraphProps> = ({
if (result.length === 0) {
return (
<Alert title="Empty query result" icon={<IconInfoCircle size={14} />}>
<Alert title="Empty query result" icon={<IconInfoCircle />}>
This query returned no data.
</Alert>
);
@ -158,7 +158,7 @@ const Graph: FC<GraphProps> = ({
<Alert
color="orange"
title="Graphing modified expression"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
<strong>Note:</strong> Range vector selectors can't be graphed, so
graphing the equivalent instant vector selector instead.

View file

@ -37,6 +37,7 @@ import {
} from "@tabler/icons-react";
import { formatNode } from "../../../promql/format";
import classes from "./LabelsExplorer.module.css";
import { buttonIconStyle } from "../../../styles";
type LabelsExplorerProps = {
metricName: string;
@ -150,7 +151,7 @@ const LabelsExplorer: FC<LabelsExplorerProps> = ({
<Alert
color="red"
title="Error querying series"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
<strong>Error:</strong> {error.message}
</Alert>
@ -177,7 +178,7 @@ const LabelsExplorer: FC<LabelsExplorerProps> = ({
variant="light"
size="xs"
onClick={() => insertText(serializeNode(selector))}
leftSection={<IconCodePlus size={18} />}
leftSection={<IconCodePlus style={buttonIconStyle} />}
title="Insert selector at cursor and close explorer"
>
Insert
@ -188,7 +189,11 @@ const LabelsExplorer: FC<LabelsExplorerProps> = ({
variant="light"
size="xs"
leftSection={
copied ? <IconCheck size={18} /> : <IconCopy size={18} />
copied ? (
<IconCheck style={buttonIconStyle} />
) : (
<IconCopy style={buttonIconStyle} />
)
}
onClick={copy}
title="Copy selector to clipboard"
@ -228,7 +233,7 @@ const LabelsExplorer: FC<LabelsExplorerProps> = ({
variant="light"
size="xs"
onClick={hideLabelsExplorer}
leftSection={<IconArrowLeft size={18} />}
leftSection={<IconArrowLeft style={buttonIconStyle} />}
>
Back to all metrics
</Button>

View file

@ -99,7 +99,7 @@ const MetricsExplorer: FC<MetricsExplorerProps> = ({
{items.map((m) => (
<Table.Tr key={m.original}>
<Table.Td>
<Group justify="space-between">
<Group justify="space-between" wrap="nowrap">
{debouncedFilterText === "" ? (
m.original
) : (

View file

@ -1,4 +1,4 @@
import { Alert, Box, Button, Stack, rem } from "@mantine/core";
import { Alert, Box, Button, Stack } from "@mantine/core";
import {
IconAlertCircle,
IconAlertTriangle,
@ -17,6 +17,7 @@ import { useEffect, useState } from "react";
import { InstantQueryResult } from "../../api/responseTypes/query";
import { humanizeDuration } from "../../lib/formatTime";
import { decodePanelOptionsFromURLParams } from "./urlStateEncoding";
import { buttonIconStyle } from "../../styles";
export default function QueryPage() {
const panels = useAppSelector((state) => state.queryPage.panels);
@ -80,9 +81,7 @@ export default function QueryPage() {
{metricNamesError && (
<Alert
mb="sm"
icon={
<IconAlertTriangle style={{ width: rem(14), height: rem(14) }} />
}
icon={<IconAlertTriangle />}
color="red"
title="Error fetching metrics list"
withCloseButton
@ -93,9 +92,7 @@ export default function QueryPage() {
{timeError && (
<Alert
mb="sm"
icon={
<IconAlertTriangle style={{ width: rem(14), height: rem(14) }} />
}
icon={<IconAlertTriangle />}
color="red"
title="Error fetching server time"
withCloseButton
@ -108,7 +105,7 @@ export default function QueryPage() {
mb="sm"
title="Server time is out of sync"
color="red"
icon={<IconAlertCircle style={{ width: rem(14), height: rem(14) }} />}
icon={<IconAlertCircle />}
onClose={() => setTimeDelta(0)}
>
Detected a time difference of{" "}
@ -131,7 +128,7 @@ export default function QueryPage() {
<Button
variant="light"
mt="xl"
leftSection={<IconPlus size={18} />}
leftSection={<IconPlus style={buttonIconStyle} />}
onClick={() => dispatch(addPanel())}
>
Add query

View file

@ -80,7 +80,7 @@ const TableTab: FC<TableTabProps> = ({ panelIdx, retriggerIdx, expr }) => {
<Alert
color="red"
title="Error executing query"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
{error.message}
</Alert>
@ -89,10 +89,7 @@ const TableTab: FC<TableTabProps> = ({ panelIdx, retriggerIdx, expr }) => {
) : (
<>
{data.data.result.length === 0 && (
<Alert
title="Empty query result"
icon={<IconInfoCircle size={14} />}
>
<Alert title="Empty query result" icon={<IconInfoCircle />}>
This query returned no data.
</Alert>
)}
@ -102,7 +99,7 @@ const TableTab: FC<TableTabProps> = ({ panelIdx, retriggerIdx, expr }) => {
key={idx}
color="red"
title="Query warning"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
{w}
</Alert>
@ -113,7 +110,7 @@ const TableTab: FC<TableTabProps> = ({ panelIdx, retriggerIdx, expr }) => {
key={idx}
color="yellow"
title="Query notice"
icon={<IconInfoCircle size={14} />}
icon={<IconInfoCircle />}
>
{w}
</Alert>

View file

@ -4,7 +4,6 @@ import {
useEffect,
useLayoutEffect,
useMemo,
useRef,
useState,
} from "react";
import ASTNode, { nodeType } from "../../promql/ast";
@ -17,6 +16,7 @@ import {
Group,
List,
Loader,
rem,
Text,
Tooltip,
} from "@mantine/core";
@ -37,6 +37,8 @@ const nodeIndent = 20;
const maxLabelNames = 10;
const maxLabelValues = 10;
const nodeIndicatorIconStyle = { width: rem(18), height: rem(18) };
type NodeState = "waiting" | "running" | "error" | "success";
const mergeChildStates = (states: NodeState[]): NodeState => {
@ -57,7 +59,7 @@ const TreeNode: FC<{
node: ASTNode;
selectedNode: { id: string; node: ASTNode } | null;
setSelectedNode: (Node: { id: string; node: ASTNode } | null) => void;
parentRef?: React.RefObject<HTMLDivElement>;
parentEl?: HTMLDivElement | null;
reportNodeState?: (childIdx: number, state: NodeState) => void;
reverse: boolean;
// The index of this node in its parent's children.
@ -66,13 +68,21 @@ const TreeNode: FC<{
node,
selectedNode,
setSelectedNode,
parentRef,
parentEl,
reportNodeState,
reverse,
childIdx,
}) => {
const nodeID = useId();
const nodeRef = useRef<HTMLDivElement>(null);
// A normal ref won't work properly here because the ref's `current` property
// going from `null` to defined won't trigger a re-render of the child
// component, since it's not a React state update. So we manually have to
// create a state update using a callback ref. See also
// https://tkdodo.eu/blog/avoiding-use-effect-with-callback-refs
const [nodeEl, setNodeEl] = useState<HTMLDivElement | null>(null);
const nodeRef = useCallback((node: HTMLDivElement) => setNodeEl(node), []);
const [connectorStyle, setConnectorStyle] = useState<CSSProperties>({
borderColor:
"light-dark(var(--mantine-color-gray-4), var(--mantine-color-dark-3))",
@ -94,10 +104,10 @@ const TreeNode: FC<{
// Select the node when it is mounted and it is the root of the tree.
useEffect(() => {
if (parentRef === undefined) {
if (parentEl === undefined) {
setSelectedNode({ id: nodeID, node: node });
}
}, [parentRef, setSelectedNode, nodeID, node]);
}, [parentEl, setSelectedNode, nodeID, node]);
// Deselect node when node is unmounted.
useEffect(() => {
@ -170,16 +180,18 @@ const TreeNode: FC<{
// Update the size and position of tree connector lines based on the node's and its parent's position.
useLayoutEffect(() => {
if (parentRef === undefined) {
if (parentEl === undefined) {
// We're the root node.
return;
}
if (parentRef.current === null || nodeRef.current === null) {
if (parentEl === null || nodeEl === null) {
// Either of the two connected nodes hasn't been rendered yet.
return;
}
const parentRect = parentRef.current.getBoundingClientRect();
const nodeRect = nodeRef.current.getBoundingClientRect();
const parentRect = parentEl.getBoundingClientRect();
const nodeRect = nodeEl.getBoundingClientRect();
if (reverse) {
setConnectorStyle((prevStyle) => ({
...prevStyle,
@ -199,7 +211,7 @@ const TreeNode: FC<{
borderTopLeftRadius: undefined,
}));
}
}, [parentRef, reverse, nodeRef, setConnectorStyle]);
}, [parentEl, nodeEl, reverse, nodeRef, setConnectorStyle]);
// Update the node info state based on the query result.
useEffect(() => {
@ -261,7 +273,7 @@ const TreeNode: FC<{
pos="relative"
align="center"
>
{parentRef && (
{parentEl !== undefined && (
// Connector line between this node and its parent.
<Box pos="absolute" display="inline-block" style={connectorStyle} />
)}
@ -288,13 +300,14 @@ const TreeNode: FC<{
</Box>
{mergedChildState === "waiting" ? (
<Group c="gray">
<IconPointFilled size={18} />
<IconPointFilled style={nodeIndicatorIconStyle} />
</Group>
) : mergedChildState === "running" ? (
<Loader size={14} color="gray" type="dots" />
) : mergedChildState === "error" ? (
<Group c="orange.7" gap={5} fz="xs" wrap="nowrap">
<IconPointFilled size={18} /> Blocked on child query error
<IconPointFilled style={nodeIndicatorIconStyle} /> Blocked on child
query error
</Group>
) : isFetching ? (
<Loader size={14} color="gray" />
@ -305,7 +318,7 @@ const TreeNode: FC<{
style={{ flexShrink: 0 }}
className={classes.errorText}
>
<IconPointFilled size={18} />
<IconPointFilled style={nodeIndicatorIconStyle} />
<Text fz="xs">
<strong>Error executing query:</strong> {error.message}
</Text>
@ -387,7 +400,7 @@ const TreeNode: FC<{
node={children[0]}
selectedNode={selectedNode}
setSelectedNode={setSelectedNode}
parentRef={nodeRef}
parentEl={nodeEl}
reverse={true}
childIdx={0}
reportNodeState={childReportNodeState}
@ -399,7 +412,7 @@ const TreeNode: FC<{
node={children[1]}
selectedNode={selectedNode}
setSelectedNode={setSelectedNode}
parentRef={nodeRef}
parentEl={nodeEl}
reverse={false}
childIdx={1}
reportNodeState={childReportNodeState}
@ -418,7 +431,7 @@ const TreeNode: FC<{
node={child}
selectedNode={selectedNode}
setSelectedNode={setSelectedNode}
parentRef={nodeRef}
parentEl={nodeEl}
reverse={false}
childIdx={idx}
reportNodeState={childReportNodeState}

View file

@ -30,6 +30,7 @@ import {
} from "../../state/serviceDiscoveryPageSlice";
import { StateMultiSelect } from "../../components/StateMultiSelect";
import badgeClasses from "../../Badge.module.css";
import { expandIconStyle, inputIconStyle } from "../../styles";
export const targetPoolDisplayLimit = 20;
@ -98,7 +99,7 @@ export default function ServiceDiscoveryPage() {
/>
<TextInput
flex={1}
leftSection={<IconSearch size={14} />}
leftSection={<IconSearch style={inputIconStyle} />}
placeholder="Filter by labels"
value={searchFilter || ""}
onChange={(event) => setSearchFilter(event.currentTarget.value)}
@ -118,9 +119,9 @@ export default function ServiceDiscoveryPage() {
}
>
{collapsedPools.length > 0 ? (
<IconLayoutNavbarExpand size={16} />
<IconLayoutNavbarExpand style={expandIconStyle} />
) : (
<IconLayoutNavbarCollapse size={16} />
<IconLayoutNavbarCollapse style={expandIconStyle} />
)}
</ActionIcon>
</Group>

View file

@ -204,10 +204,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
return (
<Stack>
{allPoolNames.length === 0 ? (
<Alert
title="No scrape pools found"
icon={<IconInfoCircle size={14} />}
>
<Alert title="No scrape pools found" icon={<IconInfoCircle />}>
No scrape pools found.
</Alert>
) : (
@ -215,7 +212,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
allPoolNames.length !== shownPoolNames.length && (
<Alert
title="Hiding pools with no matching targets"
icon={<IconInfoCircle size={14} />}
icon={<IconInfoCircle />}
>
Hiding {allPoolNames.length - shownPoolNames.length} empty pools due
to filters or no targets.
@ -228,7 +225,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
{showLimitAlert && (
<Alert
title="Found many pools, showing only one"
icon={<IconInfoCircle size={14} />}
icon={<IconInfoCircle />}
withCloseButton
onClose={() => dispatch(setShowLimitAlert(false))}
>

View file

@ -39,6 +39,7 @@ import TargetLabels from "./TargetLabels";
import { useDebouncedValue } from "@mantine/hooks";
import { targetPoolDisplayLimit } from "./TargetsPage";
import { BooleanParam, useQueryParam, withDefault } from "use-query-params";
import { badgeIconStyle } from "../../styles";
type ScrapePool = {
targets: Target[];
@ -194,10 +195,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
return (
<Stack>
{allPoolNames.length === 0 ? (
<Alert
title="No scrape pools found"
icon={<IconInfoCircle size={14} />}
>
<Alert title="No scrape pools found" icon={<IconInfoCircle />}>
No scrape pools found.
</Alert>
) : (
@ -205,7 +203,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
allPoolNames.length !== shownPoolNames.length && (
<Alert
title="Hiding pools with no matching targets"
icon={<IconInfoCircle size={14} />}
icon={<IconInfoCircle />}
>
Hiding {allPoolNames.length - shownPoolNames.length} empty pools due
to filters or no targets.
@ -218,7 +216,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
{showLimitAlert && (
<Alert
title="Found many pools, showing only one"
icon={<IconInfoCircle size={14} />}
icon={<IconInfoCircle />}
withCloseButton
onClose={() => dispatch(setShowLimitAlert(false))}
>
@ -307,10 +305,9 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
<Table>
<Table.Thead>
<Table.Tr>
<Table.Th w="30%">Endpoint</Table.Th>
<Table.Th w="25%">Endpoint</Table.Th>
<Table.Th>Labels</Table.Th>
<Table.Th w="10%">Last scrape</Table.Th>
{/* <Table.Th w="10%">Scrape duration</Table.Th> */}
<Table.Th w={230}>Last scrape</Table.Th>
<Table.Th w={100}>State</Table.Th>
</Table.Tr>
</Table.Thead>
@ -339,23 +336,20 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
/>
</Table.Td>
<Table.Td valign="top">
<Group
gap="xs"
wrap="wrap"
justify="space-between"
>
<Group gap="xs" wrap="wrap">
<Tooltip
label="Last target scrape"
withArrow
>
<Badge
w="max-content"
variant="light"
className={badgeClasses.statsBadge}
styles={{
label: { textTransform: "none" },
}}
leftSection={<IconRefresh size={12} />}
leftSection={
<IconRefresh style={badgeIconStyle} />
}
>
{humanizeDurationRelative(
target.lastScrape,
@ -369,14 +363,15 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
withArrow
>
<Badge
w="max-content"
variant="light"
className={badgeClasses.statsBadge}
styles={{
label: { textTransform: "none" },
}}
leftSection={
<IconHourglass size={12} />
<IconHourglass
style={badgeIconStyle}
/>
}
>
{humanizeDuration(
@ -388,7 +383,6 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
</Table.Td>
<Table.Td valign="top">
<Badge
w="max-content"
className={healthBadgeClass(target.health)}
>
{target.health}
@ -401,7 +395,7 @@ const ScrapePoolList: FC<ScrapePoolListProp> = ({
<Alert
color="red"
mb="sm"
icon={<IconAlertTriangle size={14} />}
icon={<IconAlertTriangle />}
>
<strong>Error scraping target:</strong>{" "}
{target.lastError}

View file

@ -4,6 +4,7 @@ import { LabelBadges } from "../../components/LabelBadges";
import { ActionIcon, Collapse, Group, Stack, Text } from "@mantine/core";
import { useDisclosure } from "@mantine/hooks";
import { IconChevronDown, IconChevronUp } from "@tabler/icons-react";
import { actionIconStyle } from "../../styles";
type TargetLabelsProps = {
labels: Labels;
@ -26,12 +27,9 @@ const TargetLabels: FC<TargetLabelsProps> = ({ discoveredLabels, labels }) => {
title={`${showDiscovered ? "Hide" : "Show"} discovered (pre-relabeling) labels`}
>
{showDiscovered ? (
<IconChevronUp
style={{ width: "70%", height: "70%" }}
stroke={1.5}
/>
<IconChevronUp style={actionIconStyle} />
) : (
<IconChevronDown style={{ width: "70%", height: "70%" }} />
<IconChevronDown style={actionIconStyle} />
)}
</ActionIcon>
</Group>

View file

@ -29,6 +29,7 @@ import ErrorBoundary from "../../components/ErrorBoundary";
import ScrapePoolList from "./ScrapePoolsList";
import { useSuspenseAPIQuery } from "../../api/api";
import { ScrapePoolsResult } from "../../api/responseTypes/scrapePools";
import { expandIconStyle, inputIconStyle } from "../../styles";
export const targetPoolDisplayLimit = 20;
@ -101,7 +102,7 @@ export default function TargetsPage() {
/>
<TextInput
flex={1}
leftSection={<IconSearch size={14} />}
leftSection={<IconSearch style={inputIconStyle} />}
placeholder="Filter by endpoint or labels"
value={searchFilter || ""}
onChange={(event) =>
@ -123,9 +124,9 @@ export default function TargetsPage() {
}
>
{collapsedPools.length > 0 ? (
<IconLayoutNavbarExpand size={16} />
<IconLayoutNavbarExpand style={expandIconStyle} />
) : (
<IconLayoutNavbarCollapse size={16} />
<IconLayoutNavbarCollapse style={expandIconStyle} />
)}
</ActionIcon>
</Group>

View file

@ -73,6 +73,7 @@ const testMetricC: InstantSample[] = [
const testCases: TestCase[] = [
{
// metric_a - metric_b
desc: "one-to-one matching on all labels",
op: binaryOperatorType.sub,
matching: {
@ -238,6 +239,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a - on(label1, label2) metric_b
desc: "one-to-one matching on explicit labels",
op: binaryOperatorType.sub,
matching: {
@ -403,6 +405,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a - ignoring(same) metric_b
desc: "one-to-one matching ignoring explicit labels",
op: binaryOperatorType.sub,
matching: {
@ -568,6 +571,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_b - metric_c
desc: "many-to-one matching with no matching labels specified (empty output)",
op: binaryOperatorType.sub,
matching: {
@ -689,6 +693,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_b - on(label1) metric_c
desc: "many-to-one matching with matching labels specified, but no group_left (error)",
op: binaryOperatorType.sub,
matching: {
@ -778,6 +783,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_b - on(label1) group_left metric_c
desc: "many-to-one matching with matching labels specified and group_left",
op: binaryOperatorType.sub,
matching: {
@ -891,6 +897,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_c - on(label1) group_right metric_b
desc: "one-to-many matching with matching labels specified and group_right",
op: binaryOperatorType.sub,
matching: {
@ -1004,6 +1011,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_c - on(label1) group_left metric_b
desc: "one-to-many matching with matching labels specified but incorrect group_left (error)",
op: binaryOperatorType.sub,
matching: {
@ -1091,6 +1099,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a - on(label1) metric_b
desc: "insufficient matching labels leading to many-to-many matching for intended one-to-one match (error)",
op: binaryOperatorType.sub,
matching: {
@ -1206,6 +1215,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a < metric_b
desc: "filter op keeping all series",
op: binaryOperatorType.lss,
matching: {
@ -1391,6 +1401,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a >= metric_b
desc: "filter op dropping all series",
op: binaryOperatorType.gte,
matching: {
@ -1576,6 +1587,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a >= bool metric_b
desc: "filter op dropping all series, but with bool",
op: binaryOperatorType.gte,
bool: true,
@ -1742,6 +1754,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a < bool metric_b
desc: "filter op keeping all series, but with bool",
op: binaryOperatorType.lss,
bool: true,
@ -1908,6 +1921,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_a - metric_b
desc: "exceeding the match group limit",
op: binaryOperatorType.sub,
matching: {
@ -2000,6 +2014,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_c - on(label1) group_left metric_b
desc: "exceeding the per-group series limit",
op: binaryOperatorType.sub,
matching: {
@ -2082,6 +2097,7 @@ const testCases: TestCase[] = [
},
},
{
// metric_c - on(label1) group_left metric_b
desc: "exceeding both group limit and per-group series limit",
op: binaryOperatorType.sub,
matching: {
@ -2131,6 +2147,732 @@ const testCases: TestCase[] = [
numGroups: 2,
},
},
{
// metric_a and metric b
desc: "and operator with no matching labels and matching groups",
op: binaryOperatorType.and,
matching: {
card: vectorMatchCardinality.manyToMany,
on: false,
include: [],
labels: [],
},
lhs: testMetricA,
rhs: testMetricB,
result: {
groups: {
[fnv1a(["a", "x", "same"])]: {
groupLabels: { label1: "a", label2: "x", same: "same" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "10"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
manySideIdx: 0,
},
],
error: null,
},
[fnv1a(["a", "y", "same"])]: {
groupLabels: { label1: "a", label2: "y", same: "same" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "20"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
manySideIdx: 0,
},
],
error: null,
},
[fnv1a(["b", "x", "same"])]: {
groupLabels: { label1: "b", label2: "x", same: "same" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "30"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
manySideIdx: 0,
},
],
error: null,
},
[fnv1a(["b", "y", "same"])]: {
groupLabels: { label1: "b", label2: "y", same: "same" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "4"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "40"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "4"],
},
manySideIdx: 0,
},
],
error: null,
},
},
numGroups: 4,
},
},
{
// metric_a[0...2] and on(label1) metric_b[1...3]
desc: "and operator with matching label and series on each side",
op: binaryOperatorType.and,
matching: {
card: vectorMatchCardinality.manyToMany,
on: true,
include: [],
labels: ["label1"],
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
result: {
groups: {
[fnv1a(["a"])]: {
groupLabels: { label1: "a" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
],
lhsCount: 2,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "20"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
manySideIdx: 0,
},
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
manySideIdx: 1,
},
],
error: null,
},
[fnv1a(["b"])]: {
groupLabels: { label1: "b" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "30"],
},
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "40"],
},
],
rhsCount: 2,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
manySideIdx: 0,
},
],
error: null,
},
},
numGroups: 2,
},
},
{
// metric_a[0...2] unless on(label1) metric_b[1...3]
desc: "unless operator with matching label and series on each side",
op: binaryOperatorType.unless,
matching: {
card: vectorMatchCardinality.manyToMany,
on: true,
include: [],
labels: ["label1"],
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
result: {
groups: {
[fnv1a(["a"])]: {
groupLabels: { label1: "a" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
],
lhsCount: 2,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "20"],
},
],
rhsCount: 1,
result: [],
error: null,
},
[fnv1a(["b"])]: {
groupLabels: { label1: "b" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "30"],
},
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "40"],
},
],
rhsCount: 2,
result: [],
error: null,
},
},
numGroups: 2,
},
},
{
// metric_a[0...2] or on(label1) metric_b[1...3]
desc: "or operator with matching label and series on each side",
op: binaryOperatorType.or,
matching: {
card: vectorMatchCardinality.manyToMany,
on: true,
include: [],
labels: ["label1"],
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
result: {
groups: {
[fnv1a(["a"])]: {
groupLabels: { label1: "a" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
],
lhsCount: 2,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "20"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
manySideIdx: 0,
},
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
manySideIdx: 1,
},
],
error: null,
},
[fnv1a(["b"])]: {
groupLabels: { label1: "b" },
lhs: [
{
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "30"],
},
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "40"],
},
],
rhsCount: 2,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
manySideIdx: 0,
},
],
error: null,
},
},
numGroups: 2,
},
},
{
// metric_a[0...2] or metric_b[1...3]
desc: "or operator with only partial overlap",
op: binaryOperatorType.or,
matching: {
card: vectorMatchCardinality.manyToMany,
on: false,
include: [],
labels: [],
},
lhs: testMetricA.slice(0, 3),
rhs: testMetricB.slice(1, 4),
result: {
groups: {
[fnv1a(["a", "x", "same"])]: {
groupLabels: {
label1: "a",
label2: "x",
same: "same",
},
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
],
lhsCount: 1,
rhs: [],
rhsCount: 0,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "x",
same: "same",
},
value: [0, "1"],
},
manySideIdx: 0,
},
],
error: null,
},
[fnv1a(["a", "y", "same"])]: {
groupLabels: {
label1: "a",
label2: "y",
same: "same",
},
lhs: [
{
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "20"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "a",
label2: "y",
same: "same",
},
value: [0, "2"],
},
manySideIdx: 0,
},
],
error: null,
},
[fnv1a(["b", "x", "same"])]: {
groupLabels: {
label1: "b",
label2: "x",
same: "same",
},
lhs: [
{
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
],
lhsCount: 1,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "30"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_a",
label1: "b",
label2: "x",
same: "same",
},
value: [0, "3"],
},
manySideIdx: 0,
},
],
error: null,
},
[fnv1a(["b", "y", "same"])]: {
groupLabels: {
label1: "b",
label2: "y",
same: "same",
},
lhs: [],
lhsCount: 0,
rhs: [
{
metric: {
__name__: "metric_b",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "40"],
},
],
rhsCount: 1,
result: [
{
sample: {
metric: {
__name__: "metric_b",
label1: "b",
label2: "y",
same: "same",
},
value: [0, "40"],
},
manySideIdx: 0,
},
],
error: null,
},
},
numGroups: 4,
},
},
];
describe("binOp", () => {

View file

@ -8,7 +8,7 @@ import {
vectorMatchCardinality,
VectorMatching,
} from "./ast";
import { isComparisonOperator } from "./utils";
import { isComparisonOperator, isSetOperator } from "./utils";
// We use a special (otherwise invalid) sample value to indicate that
// a sample has been filtered away by a comparison operator.
@ -340,20 +340,35 @@ export const computeVectorVectorBinOp = (
// Annotate the match groups with errors (if any) and populate the results.
Object.values(groups).forEach((mg) => {
if (matching.card === vectorMatchCardinality.oneToOne) {
if (mg.lhs.length > 1 && mg.rhs.length > 1) {
mg.error = { type: MatchErrorType.multipleMatchesOnBothSides };
} else if (mg.lhs.length > 1 || mg.rhs.length > 1) {
mg.error = {
type: MatchErrorType.multipleMatchesForOneToOneMatching,
dupeSide: mg.lhs.length > 1 ? "left" : "right",
};
}
} else if (mg.rhs.length > 1) {
// Check for dupes on the "one" side in one-to-many or many-to-one matching.
mg.error = {
type: MatchErrorType.multipleMatchesOnOneSide,
};
switch (matching.card) {
case vectorMatchCardinality.oneToOne:
if (mg.lhs.length > 1 && mg.rhs.length > 1) {
mg.error = { type: MatchErrorType.multipleMatchesOnBothSides };
} else if (mg.lhs.length > 1 || mg.rhs.length > 1) {
mg.error = {
type: MatchErrorType.multipleMatchesForOneToOneMatching,
dupeSide: mg.lhs.length > 1 ? "left" : "right",
};
}
break;
case vectorMatchCardinality.oneToMany:
case vectorMatchCardinality.manyToOne:
if (mg.rhs.length > 1) {
mg.error = {
type: MatchErrorType.multipleMatchesOnOneSide,
};
}
break;
case vectorMatchCardinality.manyToMany:
// Should be a set operator - these don't have errors that aren't caught during parsing.
if (!isSetOperator(op)) {
throw new Error(
"unexpected many-to-many matching for non-set operator"
);
}
break;
default:
throw new Error("unknown vector matching cardinality");
}
if (mg.error) {
@ -363,42 +378,79 @@ export const computeVectorVectorBinOp = (
return;
}
// Calculate the results for this match group.
mg.rhs.forEach((rs) => {
if (isSetOperator(op)) {
// Add LHS samples to the result, depending on specific operator condition and RHS length.
mg.lhs.forEach((ls, lIdx) => {
if (!ls.value || !rs.value) {
// TODO: Implement native histogram support.
throw new Error("native histogram support not implemented yet");
if (
(op === binaryOperatorType.and && mg.rhs.length > 0) ||
(op === binaryOperatorType.unless && mg.rhs.length === 0) ||
op === binaryOperatorType.or
) {
mg.result.push({
sample: {
metric: ls.metric,
value: ls.value,
},
manySideIdx: lIdx,
});
}
});
const [vl, vr] =
matching.card !== vectorMatchCardinality.oneToMany
? [ls.value[1], rs.value[1]]
: [rs.value[1], ls.value[1]];
let { value, keep } = vectorElemBinop(
op,
parsePrometheusFloat(vl),
parsePrometheusFloat(vr)
);
// For OR, also add all RHS samples to the result if the LHS for the group is empty.
if (op === binaryOperatorType.or) {
mg.rhs.forEach((rs, rIdx) => {
if (mg.lhs.length === 0) {
mg.result.push({
sample: {
metric: rs.metric,
value: rs.value,
},
manySideIdx: rIdx,
});
}
});
}
} else {
// Calculate the results for this match group.
mg.rhs.forEach((rs) => {
mg.lhs.forEach((ls, lIdx) => {
if (!ls.value || !rs.value) {
// TODO: Implement native histogram support.
throw new Error("native histogram support not implemented yet");
}
const metric = resultMetric(ls.metric, rs.metric, op, matching);
if (bool) {
value = keep ? 1.0 : 0.0;
delete metric.__name__;
}
const [vl, vr] =
matching.card !== vectorMatchCardinality.oneToMany
? [ls.value[1], rs.value[1]]
: [rs.value[1], ls.value[1]];
mg.result.push({
sample: {
metric: metric,
value: [
ls.value[0],
keep || bool ? formatPrometheusFloat(value) : filteredSampleValue,
],
},
manySideIdx: lIdx,
let { value, keep } = vectorElemBinop(
op,
parsePrometheusFloat(vl),
parsePrometheusFloat(vr)
);
const metric = resultMetric(ls.metric, rs.metric, op, matching);
if (bool) {
value = keep ? 1.0 : 0.0;
delete metric.__name__;
}
mg.result.push({
sample: {
metric: metric,
value: [
ls.value[0],
keep || bool
? formatPrometheusFloat(value)
: filteredSampleValue,
],
},
manySideIdx: lIdx,
});
});
});
});
}
});
// If we originally swapped the LHS and RHS, swap them back to the original order.

View file

@ -0,0 +1,15 @@
import { em, rem } from "@mantine/core";
export const navIconStyle = { width: rem(16), height: rem(16) };
export const menuIconStyle = { width: rem(14), height: rem(14) };
export const badgeIconStyle = { width: em(17), height: em(17) };
export const actionIconStyle = { width: "70%", height: "70%" };
export const inputIconStyle = { width: em(16), height: em(16) };
export const buttonIconStyle = { width: em(20), height: em(20) };
export const infoPageCardTitleIconStyle = { width: em(17.5), height: em(17.5) };
export const expandIconStyle = { width: em(16), height: em(16) };
export const themeSwitcherIconStyle = {
width: rem(20),
height: rem(20),
display: "block",
};

View file

@ -30,7 +30,7 @@
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.300.0-beta.0",
"lru-cache": "^7.18.3"
"lru-cache": "^11.0.1"
},
"devDependencies": {
"@codemirror/autocomplete": "^6.17.0",

View file

@ -14,7 +14,7 @@
import { FetchFn } from './index';
import { Matcher } from '../types';
import { labelMatchersToString } from '../parser';
import LRUCache from 'lru-cache';
import { LRUCache } from 'lru-cache';
export interface MetricMetadata {
type: string;
@ -292,7 +292,10 @@ class Cache {
private flags: Record<string, string>;
constructor(config?: CacheConfig) {
const maxAge: LRUCache.LimitedByTTL = { ttl: config && config.maxAge ? config.maxAge : 5 * 60 * 1000 };
const maxAge = {
ttl: config && config.maxAge ? config.maxAge : 5 * 60 * 1000,
ttlAutopurge: false,
};
this.completeAssociation = new LRUCache<string, Map<string, Set<string>>>(maxAge);
this.metricMetadata = {};
this.labelValues = new LRUCache<string, string[]>(maxAge);

View file

@ -18,7 +18,7 @@
"eslint-config-prettier": "^9.1.0",
"prettier": "^3.3.3",
"ts-jest": "^29.2.2",
"typescript": "^5.2.2",
"typescript": "^5.6.2",
"vite": "^5.1.0"
}
},
@ -163,7 +163,7 @@
"license": "Apache-2.0",
"dependencies": {
"@prometheus-io/lezer-promql": "0.300.0-beta.0",
"lru-cache": "^7.18.3"
"lru-cache": "^11.0.1"
},
"devDependencies": {
"@codemirror/autocomplete": "^6.17.0",
@ -7050,12 +7050,11 @@
}
},
"node_modules/lru-cache": {
"version": "7.18.3",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
"integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
"license": "ISC",
"version": "11.0.1",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.0.1.tgz",
"integrity": "sha512-CgeuL5uom6j/ZVrg7G/+1IXqRY8JXX4Hghfy5YE0EhoYQWvndP1kufu58cmZLNIDKnRhZrXfdS9urVWx98AipQ==",
"engines": {
"node": ">=12"
"node": "20 || >=22"
}
},
"node_modules/lz-string": {
@ -9011,11 +9010,10 @@
}
},
"node_modules/typescript": {
"version": "5.5.4",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.5.4.tgz",
"integrity": "sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==",
"version": "5.6.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.2.tgz",
"integrity": "sha512-NW8ByodCSNCwZeghjN3o+JX5OFH0Ojg6sadjEKY4huZ52TqbJTJnDo5+Tw98lSy63NZvi4n+ez5m2u5d4PkZyw==",
"dev": true,
"license": "Apache-2.0",
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"

View file

@ -21,7 +21,7 @@
"eslint-config-prettier": "^9.1.0",
"prettier": "^3.3.3",
"ts-jest": "^29.2.2",
"typescript": "^5.2.2",
"typescript": "^5.6.2",
"vite": "^5.1.0"
}
}

View file

@ -102,6 +102,14 @@ var newUIReactRouterServerPaths = []string{
"/tsdb-status",
}
type ReadyStatus uint32
const (
NotReady ReadyStatus = iota
Ready
Stopping
)
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
@ -331,7 +339,7 @@ func New(logger log.Logger, o *Options) *Handler {
now: model.Now,
}
h.SetReady(false)
h.SetReady(NotReady)
factorySPr := func(_ context.Context) api_v1.ScrapePoolsRetriever { return h.scrapeManager }
factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager }
@ -572,30 +580,39 @@ func serveDebug(w http.ResponseWriter, req *http.Request) {
}
// SetReady sets the ready status of our web Handler.
func (h *Handler) SetReady(v bool) {
if v {
h.ready.Store(1)
func (h *Handler) SetReady(v ReadyStatus) {
if v == Ready {
h.ready.Store(uint32(Ready))
h.metrics.readyStatus.Set(1)
return
}
h.ready.Store(0)
h.ready.Store(uint32(v))
h.metrics.readyStatus.Set(0)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
return h.ready.Load() > 0
return ReadyStatus(h.ready.Load()) == Ready
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
switch ReadyStatus(h.ready.Load()) {
case Ready:
f(w, r)
} else {
case NotReady:
w.WriteHeader(http.StatusServiceUnavailable)
w.Header().Set("X-Prometheus-Stopping", "false")
fmt.Fprintf(w, "Service Unavailable")
case Stopping:
w.Header().Set("X-Prometheus-Stopping", "true")
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
default:
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Unknown state")
}
}
}

View file

@ -156,7 +156,7 @@ func TestReadyAndHealthy(t *testing.T) {
cleanupTestResponse(t, resp)
// Set to ready.
webHandler.SetReady(true)
webHandler.SetReady(Ready)
for _, u := range []string{
baseURL + "/-/healthy",
@ -260,7 +260,7 @@ func TestRoutePrefix(t *testing.T) {
cleanupTestResponse(t, resp)
// Set to ready.
webHandler.SetReady(true)
webHandler.SetReady(Ready)
resp, err = http.Get(baseURL + opts.RoutePrefix + "/-/healthy")
require.NoError(t, err)
@ -307,7 +307,7 @@ func TestDebugHandler(t *testing.T) {
},
}
handler := New(nil, opts)
handler.SetReady(true)
handler.SetReady(Ready)
w := httptest.NewRecorder()
@ -349,7 +349,7 @@ func TestHTTPMetrics(t *testing.T) {
counter := handler.metrics.requestCounter
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
handler.SetReady(true)
handler.SetReady(Ready)
for range [2]int{} {
code = getReady()
require.Equal(t, http.StatusOK, code)
@ -358,7 +358,7 @@ func TestHTTPMetrics(t *testing.T) {
require.Equal(t, 2, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusOK)))))
require.Equal(t, 1, int(prom_testutil.ToFloat64(counter.WithLabelValues("/-/ready", strconv.Itoa(http.StatusServiceUnavailable)))))
handler.SetReady(false)
handler.SetReady(NotReady)
for range [2]int{} {
code = getReady()
require.Equal(t, http.StatusServiceUnavailable, code)
@ -537,7 +537,7 @@ func TestAgentAPIEndPoints(t *testing.T) {
opts.Flags = map[string]string{}
webHandler := New(nil, opts)
webHandler.SetReady(true)
webHandler.SetReady(Ready)
webHandler.config = &config.Config{}
webHandler.notifier = &notifier.Manager{}
l, err := webHandler.Listeners()
@ -692,7 +692,7 @@ func TestMultipleListenAddresses(t *testing.T) {
time.Sleep(5 * time.Second)
// Set to ready.
webHandler.SetReady(true)
webHandler.SetReady(Ready)
for _, port := range []string{port1, port2} {
baseURL := "http://localhost" + port