Merge branch 'main' into cedwards/nhcb-wal-wbl

# Conflicts:
#	tsdb/tsdbutil/histogram.go
This commit is contained in:
György Krajcsovits 2025-01-02 12:50:19 +01:00
commit 1e420ef373
93 changed files with 4827 additions and 2766 deletions

View file

@ -16,8 +16,23 @@ updates:
directory: "/documentation/examples/remote_storage"
schedule:
interval: "monthly"
# New manteen-ui packages.
- package-ecosystem: "npm"
directory: "/web/ui"
labels:
- dependencies
- javascript
- manteen-ui
schedule:
interval: "monthly"
open-pull-requests-limit: 20
# Old react-app packages.
- package-ecosystem: "npm"
directory: "/web/ui/react-app"
labels:
- dependencies
- javascript
- old-react-ui
schedule:
interval: "monthly"
open-pull-requests-limit: 20

View file

@ -33,7 +33,7 @@ jobs:
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
- run: GOARCH=386 go test ./...
- uses: ./.github/promci/actions/check_proto
with:
version: "3.15.8"
@ -80,7 +80,7 @@ jobs:
runs-on: windows-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
- uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
with:
go-version: 1.23.x
- run: |
@ -171,7 +171,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
with:
cache: false
go-version: 1.23.x
@ -184,7 +184,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
with:
go-version: 1.23.x
- name: Install snmp_exporter/generator dependencies
@ -247,7 +247,7 @@ jobs:
with:
node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org"
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
- uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}

View file

@ -27,12 +27,12 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Initialize CodeQL
uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
uses: github/codeql-action/init@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
uses: github/codeql-action/autobuild@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
uses: github/codeql-action/analyze@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7

View file

@ -1,61 +0,0 @@
on:
repository_dispatch:
types: [funcbench_start]
name: Funcbench Workflow
permissions:
contents: read
jobs:
run_funcbench:
name: Running funcbench
if: github.event.action == 'funcbench_start'
runs-on: ubuntu-latest
env:
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
BRANCH: ${{ github.event.client_payload.BRANCH }}
BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }}
PACKAGE_PATH: ${{ github.event.client_payload.PACKAGE_PATH }}
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
GITHUB_ORG: prometheus
GITHUB_REPO: prometheus
GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
GKE_PROJECT_ID: macro-mile-203600
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
PROVIDER: gke
ZONE: europe-west3-a
steps:
- name: Update status to pending
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Prepare nodepool
uses: docker://prominfra/funcbench:master
with:
entrypoint: "docker_entrypoint"
args: make deploy
- name: Delete all resources
if: always()
uses: docker://prominfra/funcbench:master
with:
entrypoint: "docker_entrypoint"
args: make clean
- name: Update status to failure
if: failure()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
- name: Update status to success
if: success()
run: >-
curl -i -X POST
-H "Authorization: Bearer $GITHUB_TOKEN"
-H "Content-Type: application/json"
--data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"

View file

@ -15,6 +15,8 @@ env:
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
PROVIDER: gke
RELEASE: ${{ github.event.client_payload.RELEASE }}
BENCHMARK_VERSION: ${{ github.event.client_payload.BENCHMARK_VERSION }}
BENCHMARK_DIRECTORY: ${{ github.event.client_payload.BENCHMARK_DIRECTORY }}
ZONE: europe-west3-a
jobs:
benchmark_start:

View file

@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # tag=v3.27.5
uses: github/codeql-action/upload-sarif@babb554ede22fd5605947329c4d04d8e7a0b8155 # tag=v3.27.7
with:
sarif_file: results.sarif

View file

@ -4,6 +4,15 @@
* [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428
* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416
* [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710
## 3.0.1 / 2024-11-28
The first bug fix release for Prometheus 3.
* [BUGFIX] Promql: Make subqueries left open. #15431
* [BUGFIX] Fix memory leak when query log is enabled. #15434
* [BUGFIX] Support utf8 names on /v1/label/:name/values endpoint. #15399
## 3.0.0 / 2024-11-14

View file

@ -67,7 +67,7 @@ Prometheus will now be reachable at <http://localhost:9090/>.
To build Prometheus from source code, You need:
* Go [version 1.17 or greater](https://golang.org/doc/install).
* Go [version 1.22 or greater](https://golang.org/doc/install).
* NodeJS [version 16 or greater](https://nodejs.org/).
* npm [version 7 or greater](https://www.npmjs.com/).

View file

@ -1 +1 @@
3.0.0
3.0.1

View file

@ -259,6 +259,7 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
case "created-timestamp-zero-ingestion":
c.scrape.EnableCreatedTimestampZeroIngestion = true
c.web.CTZeroIngestionEnabled = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
@ -593,12 +594,14 @@ func main() {
logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2)
}
// Get scrape configs to validate dynamically loaded scrape_config_files.
// They can change over time, but do the extra validation on startup for better experience.
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
absPath, pathErr := filepath.Abs(cfg.configFile)
if pathErr != nil {
absPath = cfg.configFile
}
logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
logger.Error(fmt.Sprintf("Error loading dynamic scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2)
}
if cfg.tsdb.EnableExemplarStorage {
@ -986,18 +989,12 @@ func main() {
listeners, err := webHandler.Listeners()
if err != nil {
logger.Error("Unable to start web listener", "err", err)
if err := queryEngine.Close(); err != nil {
logger.Warn("Closing query engine failed", "err", err)
}
os.Exit(1)
}
err = toolkit_web.Validate(*webConfig)
if err != nil {
logger.Error("Unable to validate web configuration file", "err", err)
if err := queryEngine.Close(); err != nil {
logger.Warn("Closing query engine failed", "err", err)
}
os.Exit(1)
}
@ -1019,9 +1016,6 @@ func main() {
case <-cancel:
reloadReady.Close()
}
if err := queryEngine.Close(); err != nil {
logger.Warn("Closing query engine failed", "err", err)
}
return nil
},
func(err error) {

View file

@ -144,7 +144,9 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
}
}
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
scrape.PopulateDiscoveredLabels(lb, scrapeConfig, target, targetGroup.Labels)
orig := lb.Labels()
res, err := scrape.PopulateLabels(lb, scrapeConfig, target, targetGroup.Labels)
result := sdCheckResult{
DiscoveredLabels: orig,
Labels: res,

View file

@ -117,11 +117,12 @@ func Load(s string, logger *slog.Logger) (*Config, error) {
default:
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
}
cfg.loaded = true
return cfg, nil
}
// LoadFile parses the given YAML file into a Config.
// LoadFile parses and validates the given YAML file into a read-only Config.
// Callers should never write to or shallow copy the returned Config.
func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) {
content, err := os.ReadFile(filename)
if err != nil {
@ -270,9 +271,12 @@ type Config struct {
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
loaded bool // Certain methods require configuration to use Load validation.
}
// SetDirectory joins any relative file paths with dir.
// This method writes to config, and it's not concurrency safe.
func (c *Config) SetDirectory(dir string) {
c.GlobalConfig.SetDirectory(dir)
c.AlertingConfig.SetDirectory(dir)
@ -302,24 +306,26 @@ func (c Config) String() string {
return string(b)
}
// GetScrapeConfigs returns the scrape configurations.
// GetScrapeConfigs returns the read-only, validated scrape configurations including
// the ones from the scrape_config_files.
// This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency).
// This method also assumes the Config was created by Load or LoadFile function, it returns error
// if it was not. We can't re-validate or apply globals here due to races,
// read more https://github.com/prometheus/prometheus/issues/15538.
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
if !c.loaded {
// Programmatic error, we warn before more confusing errors would happen due to lack of the globalization.
return nil, errors.New("scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen")
}
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
jobNames := map[string]string{}
for i, scfg := range c.ScrapeConfigs {
// We do these checks for library users that would not call validate in
// Unmarshal.
if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, err
}
if _, ok := jobNames[scfg.JobName]; ok {
return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
}
jobNames[scfg.JobName] = "main config file"
scfgs[i] = scfg
}
// Re-read and validate the dynamic scrape config rules.
for _, pat := range c.ScrapeConfigFiles {
fs, err := filepath.Glob(pat)
if err != nil {
@ -355,6 +361,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultConfig
// We want to set c to the defaults and then overwrite it with the input.
@ -391,18 +398,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
}
// Do global overrides and validate unique names.
// Do global overrides and validation.
jobNames := map[string]struct{}{}
for _, scfg := range c.ScrapeConfigs {
if err := scfg.Validate(c.GlobalConfig); err != nil {
return err
}
if _, ok := jobNames[scfg.JobName]; ok {
return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
}
jobNames[scfg.JobName] = struct{}{}
}
rwNames := map[string]struct{}{}
for _, rwcfg := range c.RemoteWriteConfigs {
if rwcfg == nil {
@ -1420,10 +1427,13 @@ func getGoGCEnv() int {
type translationStrategyOption string
var (
// NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added.
// NoUTF8EscapingWithSuffixes will accept metric/label names as they are.
// Unit and type suffixes may be added to metric names, according to certain rules.
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
// This option will translate all UTF-8 characters to underscores, while adding units and type suffixes.
// This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores,
// and label name characters that are not alphanumerics/underscores to underscores.
// Unit and type suffixes may be appended to metric names, according to certain rules.
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
)

View file

@ -18,6 +18,8 @@ package config
const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml"
var ruleFilesExpectedConf = &Config{
loaded: true,
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
RuleFiles: []string{

View file

@ -87,6 +87,7 @@ const (
)
var expectedConf = &Config{
loaded: true,
GlobalConfig: GlobalConfig{
ScrapeInterval: model.Duration(15 * time.Second),
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
@ -1512,10 +1513,10 @@ func TestYAMLRoundtrip(t *testing.T) {
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
got, err := Load(string(out), promslog.NewNopLogger())
require.NoError(t, err)
require.Equal(t, want, got)
}
@ -1525,10 +1526,10 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
require.NoError(t, err)
out, err := yaml.Marshal(want)
require.NoError(t, err)
got := &Config{}
require.NoError(t, yaml.UnmarshalStrict(out, got))
got, err := Load(string(out), promslog.NewNopLogger())
require.NoError(t, err)
require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
@ -2219,6 +2220,7 @@ func TestEmptyConfig(t *testing.T) {
c, err := Load("", promslog.NewNopLogger())
require.NoError(t, err)
exp := DefaultConfig
exp.loaded = true
require.Equal(t, exp, *c)
}
@ -2268,6 +2270,7 @@ func TestEmptyGlobalBlock(t *testing.T) {
require.NoError(t, err)
exp := DefaultConfig
exp.Runtime = DefaultRuntimeConfig
exp.loaded = true
require.Equal(t, exp, *c)
}
@ -2548,3 +2551,18 @@ func TestScrapeProtocolHeader(t *testing.T) {
})
}
}
// Regression test against https://github.com/prometheus/prometheus/issues/15538
func TestGetScrapeConfigs_Loaded(t *testing.T) {
t.Run("without load", func(t *testing.T) {
c := &Config{}
_, err := c.GetScrapeConfigs()
require.EqualError(t, err, "scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen")
})
t.Run("with load", func(t *testing.T) {
c, err := Load("", promslog.NewNopLogger())
require.NoError(t, err)
_, err = c.GetScrapeConfigs()
require.NoError(t, err)
})
}

View file

@ -16,6 +16,8 @@ package config
const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml"
var ruleFilesExpectedConf = &Config{
loaded: true,
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
RuleFiles: []string{

View file

@ -205,9 +205,6 @@ func getEndpointInfoForSystems(
err := rpcclient.Call(
"system.monitoring.listEndpoints",
[]interface{}{token, systemIDs}, &endpointInfos)
if err != nil {
return nil, err
}
return endpointInfos, err
}

View file

@ -179,8 +179,8 @@ otlp:
# - "UnderscoreEscapingWithSuffixes" refers to commonly agreed normalization used
# by OpenTelemetry in https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus
# - "NoUTF8EscapingWithSuffixes" is a mode that relies on UTF-8 support in Prometheus.
# It preserves all special characters like dots, but it still add required suffixes
# for units and _total like in UnderscoreEscapingWithSuffixes.
# It preserves all special characters like dots, but still adds required metric name suffixes
# for units and _total, as UnderscoreEscapingWithSuffixes does.
[ translation_strategy: <string> | default = "UnderscoreEscapingWithSuffixes" ]
# Enables adding "service.name", "service.namespace" and "service.instance.id"
# resource attributes to the "target_info" metric, on top of converting
@ -676,6 +676,13 @@ http_headers:
Azure SD configurations allow retrieving scrape targets from Azure VMs.
The discovery requires at least the following permissions:
* `Microsoft.Compute/virtualMachines/read`: Required for VM discovery
* `Microsoft.Network/networkInterfaces/read`: Required for VM discovery
* `Microsoft.Compute/virtualMachineScaleSets/virtualMachines/read`: Required for scale set (VMSS) discovery
* `Microsoft.Compute/virtualMachineScaleSets/virtualMachines/networkInterfaces/read`: Required for scale set (VMSS) discovery
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_azure_machine_id`: the machine ID

View file

@ -15,7 +15,7 @@ They may be enabled by default in future versions.
`--enable-feature=exemplar-storage`
[OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces.
[OpenMetrics](https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces.
Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The config file block [storage](configuration/configuration.md#configuration-file)/[exemplars](configuration/configuration.md#exemplars) can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `trace_id=<jaeger-trace-id>` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration).

View file

@ -22,7 +22,7 @@ otlp:
- k8s.pod.name
- k8s.replicaset.name
- k8s.statefulset.name
# Ingest OTLP data keeping UTF-8 characters in metric/label names.
# Ingest OTLP data keeping all characters in metric/label names.
translation_strategy: NoUTF8EscapingWithSuffixes
storage:

View file

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/documentation/examples/remote_storage
go 1.22.0
go 1.22.7
require (
github.com/alecthomas/kingpin/v2 v2.4.0
@ -8,8 +8,8 @@ require (
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.8
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.60.1
github.com/prometheus/prometheus v0.53.1
github.com/prometheus/common v0.61.0
github.com/prometheus/prometheus v1.99.0
github.com/stretchr/testify v1.10.0
)
@ -55,15 +55,15 @@ require (
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.27.0 // indirect
golang.org/x/net v0.29.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.18.0 // indirect
golang.org/x/crypto v0.30.0 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/oauth2 v0.24.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
google.golang.org/protobuf v1.35.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.29.3 // indirect

View file

@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -344,20 +344,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -373,17 +373,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -411,8 +411,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

98
go.mod
View file

@ -1,8 +1,8 @@
module github.com/prometheus/prometheus
go 1.22.0
go 1.22.7
toolchain go1.23.0
toolchain go1.23.4
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
@ -12,32 +12,32 @@ require (
github.com/Code-Hex/go-generics-cache v1.5.1
github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
github.com/aws/aws-sdk-go v1.55.5
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.131.0
github.com/docker/docker v27.3.1+incompatible
github.com/digitalocean/godo v1.132.0
github.com/docker/docker v27.4.1+incompatible
github.com/edsrzf/mmap-go v1.2.0
github.com/envoyproxy/go-control-plane v0.13.1
github.com/envoyproxy/protoc-gen-validate v1.1.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.7.0
github.com/fsnotify/fsnotify v1.8.0
github.com/go-openapi/strfmt v0.23.0
github.com/go-zookeeper/zk v1.0.4
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.6.0
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud v1.14.1
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.30.0
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3
github.com/hetznercloud/hcloud-go/v2 v2.17.0
github.com/ionos-cloud/sdk-go/v6 v6.2.1
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec
github.com/hetznercloud/hcloud-go/v2 v2.17.1
github.com/ionos-cloud/sdk-go/v6 v6.3.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.11
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
@ -52,37 +52,37 @@ require (
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.60.1
github.com/prometheus/common v0.61.0
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/exporter-toolkit v0.13.1
github.com/prometheus/exporter-toolkit v0.13.2
github.com/prometheus/sigv4 v0.1.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.10.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.20.0
go.opentelemetry.io/collector/semconv v0.114.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0
go.opentelemetry.io/otel v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0
go.opentelemetry.io/otel/sdk v1.32.0
go.opentelemetry.io/otel/trace v1.32.0
go.opentelemetry.io/collector/pdata v1.22.0
go.opentelemetry.io/collector/semconv v0.116.0
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
go.opentelemetry.io/otel v1.33.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0
go.opentelemetry.io/otel/sdk v1.33.0
go.opentelemetry.io/otel/trace v1.33.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
golang.org/x/oauth2 v0.24.0
golang.org/x/sync v0.9.0
golang.org/x/sys v0.27.0
golang.org/x/text v0.20.0
golang.org/x/tools v0.27.0
google.golang.org/api v0.204.0
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28
google.golang.org/grpc v1.67.1
google.golang.org/protobuf v1.35.2
golang.org/x/sync v0.10.0
golang.org/x/sys v0.28.0
golang.org/x/text v0.21.0
golang.org/x/tools v0.28.0
google.golang.org/api v0.213.0
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484
google.golang.org/grpc v1.69.0
google.golang.org/protobuf v1.36.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.31.3
@ -93,9 +93,9 @@ require (
)
require (
cloud.google.com/go/auth v0.10.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
cloud.google.com/go/auth v0.13.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
@ -104,7 +104,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cilium/ebpf v0.11.0 // indirect
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@ -121,26 +121,25 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.22.2 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/loads v0.21.5 // indirect
github.com/go-openapi/spec v0.20.14 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-openapi/validate v0.23.0 // indirect
github.com/go-resty/resty/v2 v2.15.3 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/glog v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@ -151,7 +150,7 @@ require (
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
@ -184,16 +183,16 @@ require (
github.com/x448/float16 v0.8.4 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.32.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.29.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
golang.org/x/crypto v0.31.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/net v0.31.0 // indirect
golang.org/x/term v0.26.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
golang.org/x/net v0.32.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/time v0.8.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
@ -213,3 +212,6 @@ exclude (
github.com/grpc-ecosystem/grpc-gateway v1.14.7
google.golang.org/api v0.30.0
)
// Pin until https://github.com/fsnotify/fsnotify/issues/656 is resolved.
replace github.com/fsnotify/fsnotify v1.8.0 => github.com/fsnotify/fsnotify v1.7.0

203
go.sum
View file

@ -1,11 +1,11 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo=
cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk=
cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
@ -42,8 +42,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@ -74,8 +74,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
@ -91,14 +91,14 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/digitalocean/godo v1.131.0 h1:0WHymufAV5avpodT0h5/pucUVfO4v7biquOIqhLeROY=
github.com/digitalocean/godo v1.131.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/digitalocean/godo v1.132.0 h1:n0x6+ZkwbyQBtIU1wwBhv26EINqHg0wWQiBXlwYg/HQ=
github.com/digitalocean/godo v1.132.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -146,8 +146,8 @@ github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE
github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo=
github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0=
@ -156,8 +156,8 @@ github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/
github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw=
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw=
github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE=
github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8=
@ -177,21 +177,11 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
@ -205,9 +195,7 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -216,8 +204,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -225,8 +213,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
@ -235,8 +223,8 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
@ -283,16 +271,16 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.17.0 h1:ge0w2piey9SV6XGyU/wQ6HBR24QyMbJ3wLzezplqR68=
github.com/hetznercloud/hcloud-go/v2 v2.17.0/go.mod h1:zfyZ4Orx+mPpYDzWAxXR7DHGL50nnlZ5Edzgs1o6f/s=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/hetznercloud/hcloud-go/v2 v2.17.1 h1:DPi019dv0WCiECEmtcuTgc//hBvnxESb6QlJnAb4a04=
github.com/hetznercloud/hcloud-go/v2 v2.17.1/go.mod h1:6ygmBba+FdawR2lLp/d9uJljY2k0dTYthprrI8usdLw=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/ionos-cloud/sdk-go/v6 v6.3.0 h1:/lTieTH9Mo/CWm3cTlFLnK10jgxjUGkAqRffGqvPteY=
github.com/ionos-cloud/sdk-go/v6 v6.3.0/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -438,12 +426,12 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/exporter-toolkit v0.13.1 h1:Evsh0gWQo2bdOHlnz9+0Nm7/OFfIwhE2Ws4A2jIlR04=
github.com/prometheus/exporter-toolkit v0.13.1/go.mod h1:ujdv2YIOxtdFxxqtloLpbqmxd5J0Le6IITUvIRSWjj0=
github.com/prometheus/exporter-toolkit v0.13.2 h1:Z02fYtbqTMy2i/f+xZ+UK5jy/bl1Ex3ndzh06T/Q9DQ=
github.com/prometheus/exporter-toolkit v0.13.2/go.mod h1:tCqnfx21q6qN1KA4U3Bfb8uWzXfijIrJz3/kTIqMV7g=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@ -485,7 +473,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
@ -501,32 +488,34 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.20.0 h1:ePcwt4bdtISP0loHaE+C9xYoU2ZkIvWv89Fob16o9SM=
go.opentelemetry.io/collector/pdata v1.20.0/go.mod h1:Ox1YVLe87cZDB/TL30i4SUz1cA5s6AM6SpFMfY61ICs=
go.opentelemetry.io/collector/semconv v0.114.0 h1:/eKcCJwZepQUtEuFuxa0thx2XIOvhFpaf214ZG1a11k=
go.opentelemetry.io/collector/semconv v0.114.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0 h1:7F3XCD6WYzDkwbi8I8N+oYJWquPVScnRosKGgqjsR8c=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.57.0/go.mod h1:Dk3C0BfIlZDZ5c6eVS7TYiH2vssuyUU3vUsgbrR+5V4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI=
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4=
go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU=
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI=
go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY=
go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y=
go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI=
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
@ -541,8 +530,8 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
@ -566,12 +555,11 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
@ -583,8 +571,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -615,19 +603,19 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -638,43 +626,32 @@ golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4=
google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag=
google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ=
google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g=
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -18,7 +18,7 @@ import "github.com/prometheus/prometheus/model/labels"
// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of
// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128
// UTF-8 characters."
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars
const ExemplarMaxLabelSetLength = 128
// Exemplar is additional information associated with a time series.

View file

@ -19,12 +19,12 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram {
bucketsPerSide := numBuckets / 2
spanLength := uint32(bucketsPerSide / numSpans)
// Given all bucket deltas are 1, sum bucketsPerSide + 1.
observationCount := bucketsPerSide * (1 + bucketsPerSide)
observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide))
var histograms []*Histogram
for i := 0; i < numHistograms; i++ {
h := &Histogram{
Count: uint64(i + observationCount),
Count: uint64(i) + observationCount,
ZeroCount: uint64(i),
ZeroThreshold: 1e-128,
Sum: 18.4 * float64(i+1),

View file

@ -19,6 +19,7 @@ import (
"bytes"
"slices"
"strings"
"unsafe"
"github.com/cespare/xxhash/v2"
)
@ -488,3 +489,8 @@ func (b *ScratchBuilder) Labels() Labels {
func (b *ScratchBuilder) Overwrite(ls *Labels) {
*ls = append((*ls)[:0], b.add...)
}
// SizeOfLabels returns the approximate space required for n copies of a label.
func SizeOfLabels(name, value string, n uint64) uint64 {
return (uint64(len(name)) + uint64(unsafe.Sizeof(name)) + uint64(len(value)) + uint64(unsafe.Sizeof(value))) * n
}

View file

@ -51,7 +51,11 @@ func (ls Labels) String() string {
b.WriteByte(',')
b.WriteByte(' ')
}
b.WriteString(l.Name)
if !model.LabelName(l.Name).IsValidLegacy() {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name))
} else {
b.WriteString(l.Name)
}
b.WriteByte('=')
b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value))
i++

View file

@ -815,3 +815,8 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) {
ls.syms = b.syms.nameTable
ls.data = yoloString(b.overwriteBuffer)
}
// SizeOfLabels returns the approximate space required for n copies of a label.
func SizeOfLabels(name, value string, n uint64) uint64 {
return uint64(len(name)+len(value)) + n*4 // Assuming most symbol-table entries are 2 bytes long.
}

View file

@ -691,3 +691,8 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder {
func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) {
// no-op
}
// SizeOfLabels returns the approximate space required for n copies of a label.
func SizeOfLabels(name, value string, n uint64) uint64 {
return uint64(labelSize(&Label{Name: name, Value: value})) * n
}

View file

@ -39,6 +39,10 @@ func TestLabels_String(t *testing.T) {
labels: Labels{},
expected: "{}",
},
{
labels: FromStrings("service.name", "t1", "whatever\\whatever", "t2"),
expected: `{"service.name"="t1", "whatever\\whatever"="t2"}`,
},
}
for _, c := range cases {
str := c.labels.String()

View file

@ -337,7 +337,7 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
}
// All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds.
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#timestamps
ct := int64(p.val * 1000.0)
p.setCTParseValues(ct, currHash, currName, true)
return &ct

View file

@ -436,6 +436,8 @@ func NewEngine(opts EngineOpts) *Engine {
}
// Close closes ng.
// Callers must ensure the engine is really no longer in use before calling this to avoid
// issues failures like in https://github.com/prometheus/prometheus/issues/15232
func (ng *Engine) Close() error {
if ng == nil {
return nil
@ -1352,7 +1354,7 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
}
groups := make([]groupedAggregation, groupCount)
var k int
var k int64
var ratio float64
var seriess map[uint64]Series
switch aggExpr.Op {
@ -1360,9 +1362,9 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate
if !convertibleToInt64(param) {
ev.errorf("Scalar value %v overflows int64", param)
}
k = int(param)
if k > len(inputMatrix) {
k = len(inputMatrix)
k = int64(param)
if k > int64(len(inputMatrix)) {
k = int64(len(inputMatrix))
}
if k < 1 {
return nil, warnings
@ -3172,7 +3174,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix
// seriesToResult maps inputMatrix indexes to groups indexes.
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk, or without any order for limitk / limit_ratio.
// For a range query, aggregates output in the seriess map.
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int64, r float64, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
op := e.Op
var s Sample
var annos annotations.Annotations
@ -3243,7 +3245,7 @@ seriesLoop:
case s.H != nil:
// Ignore histogram sample and add info annotation.
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("topk", e.PosRange))
case len(group.heap) < k:
case int64(len(group.heap)) < k:
heap.Push(&group.heap, &s)
case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is bigger than the previous smallest element - overwrite that.
@ -3259,7 +3261,7 @@ seriesLoop:
case s.H != nil:
// Ignore histogram sample and add info annotation.
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo("bottomk", e.PosRange))
case len(group.heap) < k:
case int64(len(group.heap)) < k:
heap.Push((*vectorByReverseValueHeap)(&group.heap), &s)
case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is smaller than the previous biggest element - overwrite that.
@ -3270,13 +3272,13 @@ seriesLoop:
}
case parser.LIMITK:
if len(group.heap) < k {
if int64(len(group.heap)) < k {
heap.Push(&group.heap, &s)
}
// LIMITK optimization: early break if we've added K elem to _every_ group,
// especially useful for large timeseries where the user is exploring labels via e.g.
// limitk(10, my_metric)
if !group.groupAggrComplete && len(group.heap) == k {
if !group.groupAggrComplete && int64(len(group.heap)) == k {
group.groupAggrComplete = true
groupsRemaining--
if groupsRemaining == 0 {

View file

@ -751,7 +751,7 @@ load 10s
Interval: 5 * time.Second,
},
{
Query: `count_values("wrong label!", metric)`,
Query: `count_values("wrong label!\xff", metric)`,
ShouldError: true,
},
}

View file

@ -345,14 +345,17 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 {
return x + y
}
// Holt-Winters is similar to a weighted moving average, where historical data has exponentially less influence on the current data.
// Holt-Winter also accounts for trends in data. The smoothing factor (0 < sf < 1) affects how historical data will affect the current
// data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects
// how trends in historical data will affect the current data. A higher trend factor increases the influence.
// of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing".
// Double exponential smoothing is similar to a weighted moving average, where
// historical data has exponentially less influence on the current data. It also
// accounts for trends in data. The smoothing factor (0 < sf < 1) affects how
// historical data will affect the current data. A lower smoothing factor
// increases the influence of historical data. The trend factor (0 < tf < 1)
// affects how trends in historical data will affect the current data. A higher
// trend factor increases the influence. of trends. Algorithm taken from
// https://en.wikipedia.org/wiki/Exponential_smoothing .
func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0]
metricName := samples.Metric.Get(labels.MetricName)
// The smoothing factor argument.
sf := vals[1].(Vector)[0].F
@ -371,6 +374,10 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions
// Can't do the smoothing operation with less than two points.
if l < 2 {
// Annotate mix of float and histogram.
if l == 1 && len(samples.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return enh.Out, nil
}
@ -391,7 +398,9 @@ func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions
s0, s1 = s1, x+y
}
if len(samples.Histograms) > 0 {
return append(enh.Out, Sample{F: s1}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return append(enh.Out, Sample{F: s1}), nil
}
@ -1107,10 +1116,15 @@ func linearRegression(samples []FPoint, interceptTime int64) (slope, intercept f
// === deriv(node parser.ValueTypeMatrix) (Vector, Annotations) ===
func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0]
metricName := samples.Metric.Get(labels.MetricName)
// No sense in trying to compute a derivative without at least two points.
// No sense in trying to compute a derivative without at least two float points.
// Drop this Vector element.
if len(samples.Floats) < 2 {
// Annotate mix of float and histogram.
if len(samples.Floats) == 1 && len(samples.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return enh.Out, nil
}
@ -1118,6 +1132,9 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
// to avoid floating point accuracy issues, see
// https://github.com/prometheus/prometheus/issues/2674
slope, _ := linearRegression(samples.Floats, samples.Floats[0].T)
if len(samples.Histograms) > 0 {
return append(enh.Out, Sample{F: slope}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return append(enh.Out, Sample{F: slope}), nil
}
@ -1125,13 +1142,22 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper
func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
samples := vals[0].(Matrix)[0]
duration := vals[1].(Vector)[0].F
// No sense in trying to predict anything without at least two points.
metricName := samples.Metric.Get(labels.MetricName)
// No sense in trying to predict anything without at least two float points.
// Drop this Vector element.
if len(samples.Floats) < 2 {
// Annotate mix of float and histogram.
if len(samples.Floats) == 1 && len(samples.Histograms) > 0 {
return enh.Out, annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return enh.Out, nil
}
slope, intercept := linearRegression(samples.Floats, enh.Ts)
slope, intercept := linearRegression(samples.Floats, enh.Ts)
if len(samples.Histograms) > 0 {
return append(enh.Out, Sample{F: slope*duration + intercept}), annotations.New().Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
}
return append(enh.Out, Sample{F: slope*duration + intercept}), nil
}

View file

@ -669,14 +669,14 @@ label_set_item : IDENTIFIER EQL STRING
{ $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } }
| string_identifier EQL STRING
{ $$ = labels.Label{Name: $1.Val, Value: yylex.(*parser).unquoteString($3.Val) } }
| string_identifier
{ $$ = labels.Label{Name: labels.MetricName, Value: $1.Val} }
| IDENTIFIER EQL error
{ yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}}
| string_identifier EQL error
{ yylex.(*parser).unexpected("label set", "string"); $$ = labels.Label{}}
| IDENTIFIER error
{ yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}}
| string_identifier error
{ yylex.(*parser).unexpected("label set", "\"=\""); $$ = labels.Label{}}
| error
{ yylex.(*parser).unexpected("label set", "identifier or \"}\""); $$ = labels.Label{} }
;

View file

@ -339,7 +339,7 @@ var yyExca = [...]int16{
79, 197,
85, 197,
-2, 125,
-1, 205,
-1, 204,
9, 246,
12, 246,
13, 246,
@ -371,7 +371,7 @@ var yyExca = [...]int16{
88, 246,
89, 246,
-2, 0,
-1, 206,
-1, 205,
9, 246,
12, 246,
13, 246,
@ -407,139 +407,139 @@ var yyExca = [...]int16{
const yyPrivate = 57344
const yyLast = 804
const yyLast = 803
var yyAct = [...]int16{
155, 339, 337, 158, 344, 231, 39, 197, 281, 44,
296, 295, 84, 120, 82, 181, 109, 108, 351, 352,
353, 354, 107, 111, 203, 136, 204, 159, 154, 112,
205, 206, 234, 6, 271, 55, 163, 163, 107, 334,
333, 307, 244, 275, 309, 54, 162, 162, 250, 363,
91, 272, 330, 131, 362, 233, 60, 270, 276, 110,
100, 101, 298, 115, 103, 116, 106, 90, 164, 164,
114, 265, 113, 361, 277, 307, 360, 246, 247, 338,
103, 248, 106, 153, 165, 165, 264, 316, 201, 261,
122, 105, 235, 237, 239, 240, 241, 249, 251, 254,
255, 256, 257, 258, 262, 263, 273, 105, 236, 238,
242, 243, 245, 252, 253, 152, 117, 166, 259, 260,
176, 164, 170, 173, 163, 168, 223, 169, 172, 2,
3, 4, 5, 107, 162, 199, 111, 165, 187, 202,
189, 171, 112, 269, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220, 221, 200,
89, 91, 113, 222, 123, 193, 268, 329, 224, 225,
183, 100, 101, 191, 121, 103, 104, 106, 90, 7,
85, 234, 266, 182, 55, 183, 328, 86, 192, 123,
83, 244, 122, 267, 54, 132, 190, 250, 188, 121,
345, 230, 105, 86, 233, 77, 35, 119, 304, 10,
185, 327, 86, 303, 293, 294, 157, 315, 297, 79,
184, 186, 326, 163, 274, 185, 246, 247, 302, 325,
248, 324, 314, 162, 323, 184, 186, 299, 261, 313,
322, 235, 237, 239, 240, 241, 249, 251, 254, 255,
256, 257, 258, 262, 263, 164, 321, 236, 238, 242,
243, 245, 252, 253, 180, 126, 320, 259, 260, 179,
125, 165, 305, 319, 306, 308, 318, 310, 317, 130,
88, 129, 178, 124, 311, 312, 137, 138, 139, 140,
154, 338, 336, 157, 343, 230, 39, 196, 280, 44,
295, 294, 84, 120, 82, 233, 180, 109, 108, 350,
351, 352, 353, 110, 111, 243, 202, 158, 203, 135,
112, 249, 361, 6, 333, 329, 113, 332, 232, 204,
205, 308, 271, 60, 130, 270, 297, 268, 162, 315,
156, 360, 153, 306, 359, 344, 200, 162, 161, 55,
245, 246, 222, 115, 247, 116, 107, 161, 269, 54,
267, 114, 260, 306, 182, 234, 236, 238, 239, 240,
248, 250, 253, 254, 255, 256, 257, 261, 262, 163,
122, 235, 237, 241, 242, 244, 251, 252, 192, 328,
111, 258, 259, 117, 190, 164, 112, 152, 103, 55,
106, 337, 77, 113, 184, 151, 35, 165, 327, 54,
175, 191, 169, 172, 183, 185, 167, 189, 168, 2,
3, 4, 5, 107, 198, 105, 159, 160, 201, 186,
188, 7, 326, 206, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220, 199, 194,
89, 91, 221, 162, 264, 325, 197, 223, 224, 171,
200, 100, 101, 161, 162, 103, 104, 106, 90, 263,
233, 324, 170, 162, 161, 323, 362, 322, 321, 274,
243, 122, 266, 161, 131, 163, 249, 272, 123, 320,
229, 319, 105, 232, 275, 318, 163, 317, 121, 85,
316, 164, 163, 292, 293, 163, 265, 296, 129, 83,
276, 86, 164, 273, 10, 245, 246, 187, 164, 247,
88, 164, 86, 50, 79, 36, 298, 260, 1, 78,
234, 236, 238, 239, 240, 248, 250, 253, 254, 255,
256, 257, 261, 262, 123, 49, 235, 237, 241, 242,
244, 251, 252, 181, 121, 182, 258, 259, 128, 48,
127, 304, 119, 305, 307, 59, 309, 86, 9, 9,
47, 46, 134, 310, 311, 136, 137, 138, 139, 140,
141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
151, 195, 160, 161, 50, 163, 36, 167, 198, 331,
78, 332, 201, 228, 55, 162, 85, 227, 1, 340,
341, 342, 336, 49, 54, 343, 83, 347, 346, 349,
348, 48, 226, 47, 81, 355, 356, 164, 55, 86,
357, 53, 77, 301, 56, 8, 359, 22, 54, 37,
55, 175, 46, 165, 57, 128, 135, 127, 45, 43,
54, 364, 300, 59, 133, 174, 9, 9, 42, 134,
75, 41, 40, 51, 196, 358, 18, 19, 278, 87,
20, 194, 229, 80, 350, 156, 76, 58, 232, 52,
118, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 0, 0, 0, 13, 0,
0, 0, 24, 0, 30, 0, 0, 31, 32, 55,
38, 0, 53, 77, 0, 56, 280, 0, 22, 54,
0, 0, 0, 279, 0, 57, 0, 283, 284, 282,
289, 291, 288, 290, 285, 286, 287, 292, 0, 0,
0, 75, 0, 0, 0, 0, 0, 18, 19, 0,
0, 20, 0, 0, 0, 0, 0, 76, 0, 0,
0, 0, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 0, 0, 0, 13,
0, 0, 0, 24, 0, 30, 0, 55, 31, 32,
53, 77, 0, 56, 335, 0, 22, 54, 0, 0,
0, 0, 0, 57, 0, 283, 284, 282, 289, 291,
288, 290, 285, 286, 287, 292, 0, 0, 0, 75,
0, 0, 0, 0, 0, 18, 19, 0, 0, 20,
0, 0, 0, 17, 77, 76, 0, 0, 0, 22,
45, 43, 132, 173, 179, 184, 166, 85, 330, 178,
331, 42, 133, 55, 41, 183, 185, 83, 339, 340,
341, 335, 177, 54, 342, 81, 346, 345, 348, 347,
86, 303, 40, 314, 354, 355, 302, 55, 51, 356,
53, 77, 300, 56, 195, 358, 22, 54, 313, 55,
174, 301, 227, 57, 8, 312, 226, 357, 37, 54,
363, 299, 126, 277, 87, 193, 228, 125, 80, 75,
349, 225, 155, 58, 231, 18, 19, 52, 118, 20,
124, 0, 0, 0, 0, 76, 0, 0, 0, 0,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 0, 0, 0, 13, 0, 0,
0, 24, 0, 30, 0, 0, 31, 32, 18, 19,
0, 0, 20, 0, 0, 0, 17, 35, 0, 0,
0, 0, 22, 11, 12, 14, 15, 16, 21, 23,
25, 26, 27, 28, 29, 33, 34, 0, 0, 0,
13, 0, 0, 0, 24, 0, 30, 0, 0, 31,
32, 18, 19, 0, 0, 20, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 11, 12, 14, 15,
16, 21, 23, 25, 26, 27, 28, 29, 33, 34,
107, 0, 0, 13, 0, 0, 0, 24, 177, 30,
0, 0, 31, 32, 0, 0, 0, 0, 0, 107,
0, 0, 0, 0, 0, 0, 0, 89, 91, 92,
0, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 0, 103, 104, 106, 90, 89, 91, 92, 0,
0, 24, 0, 30, 0, 0, 31, 32, 55, 38,
107, 53, 77, 0, 56, 279, 0, 22, 54, 0,
0, 0, 278, 0, 57, 0, 282, 283, 281, 288,
290, 287, 289, 284, 285, 286, 291, 0, 91, 0,
75, 0, 0, 0, 0, 0, 18, 19, 100, 101,
20, 0, 103, 0, 106, 90, 76, 0, 0, 0,
0, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 0, 0, 0, 13, 105,
0, 0, 24, 0, 30, 0, 55, 31, 32, 53,
77, 0, 56, 334, 0, 22, 54, 0, 0, 0,
0, 0, 57, 0, 282, 283, 281, 288, 290, 287,
289, 284, 285, 286, 291, 0, 0, 0, 75, 0,
0, 0, 0, 0, 18, 19, 0, 0, 20, 0,
0, 0, 17, 77, 76, 0, 0, 0, 22, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 0, 0, 0, 13, 0, 0, 0,
24, 0, 30, 0, 0, 31, 32, 18, 19, 0,
0, 20, 0, 0, 0, 17, 35, 0, 0, 0,
0, 22, 11, 12, 14, 15, 16, 21, 23, 25,
26, 27, 28, 29, 33, 34, 0, 0, 0, 13,
0, 0, 0, 24, 0, 30, 0, 0, 31, 32,
18, 19, 0, 0, 20, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 11, 12, 14, 15, 16,
21, 23, 25, 26, 27, 28, 29, 33, 34, 107,
0, 0, 13, 0, 0, 0, 24, 176, 30, 0,
0, 31, 32, 0, 0, 0, 0, 0, 107, 0,
0, 0, 0, 0, 0, 0, 89, 91, 92, 0,
93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
0, 103, 104, 106, 90, 107, 0, 0, 0, 105,
0, 103, 104, 106, 90, 89, 91, 92, 0, 93,
94, 95, 96, 97, 98, 99, 100, 101, 102, 0,
103, 104, 106, 90, 107, 0, 0, 0, 105, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 107, 0, 0, 0, 105, 0,
0, 0, 89, 91, 92, 0, 93, 94, 95, 0,
97, 98, 99, 100, 101, 102, 0, 103, 104, 106,
90, 89, 91, 92, 0, 93, 94, 0, 0, 97,
98, 0, 100, 101, 102, 0, 103, 104, 106, 90,
0, 0, 0, 0, 105, 0, 0, 0, 0, 0,
0, 0, 0, 107, 0, 0, 0, 105, 0, 0,
0, 89, 91, 92, 0, 93, 94, 95, 0, 97,
98, 99, 100, 101, 102, 0, 103, 104, 106, 90,
89, 91, 92, 0, 93, 94, 0, 0, 97, 98,
0, 100, 101, 102, 0, 103, 104, 106, 90, 0,
0, 0, 0, 105, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 105,
0, 0, 105,
}
var yyPact = [...]int16{
31, 169, 574, 574, 410, 531, -1000, -1000, -1000, 193,
31, 131, 573, 573, 409, 530, -1000, -1000, -1000, 103,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, 314, -1000, 278, -1000, 655,
-1000, -1000, -1000, -1000, -1000, 305, -1000, 228, -1000, 654,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 57, 147, -1000, -1000, 488, -1000, 488, 192,
-1000, -1000, 21, 98, -1000, -1000, 487, -1000, 487, 99,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 187, -1000, -1000,
263, -1000, -1000, 353, 277, -1000, -1000, 29, -1000, -53,
-53, -53, -53, -53, -53, -53, -53, -53, -53, -53,
-53, -53, -53, -53, -53, 26, 214, 305, 147, -56,
-1000, 126, 126, 329, -1000, 636, 24, -1000, 262, -1000,
-1000, 181, 166, -1000, -1000, 178, -1000, 171, -1000, 163,
-1000, 296, 488, -1000, -58, -50, -1000, 488, 488, 488,
488, 488, 488, 488, 488, 488, 488, 488, 488, 488,
488, 488, -1000, 175, -1000, -1000, 111, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, 115, 115, 311, -1000, -1000, -1000,
-1000, 179, -1000, -1000, 64, -1000, 655, -1000, -1000, 162,
-1000, 141, -1000, -1000, -1000, -1000, -1000, 32, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, 25, 80, 17, -1000, -1000,
-1000, 409, 8, 126, 126, 126, 126, 24, 24, 119,
119, 119, 720, 701, 119, 119, 720, 24, 24, 119,
24, 8, -1000, 40, -1000, -1000, -1000, 341, -1000, 206,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 252, -1000, -1000,
360, -1000, -1000, 266, 214, -1000, -1000, 20, -1000, -49,
-49, -49, -49, -49, -49, -49, -49, -49, -49, -49,
-49, -49, -49, -49, -49, 50, 48, 304, 98, -55,
-1000, 167, 167, 328, -1000, 635, 52, -1000, 302, -1000,
-1000, 261, 70, -1000, -1000, 207, -1000, 102, -1000, 96,
154, 487, -1000, -56, -41, -1000, 487, 487, 487, 487,
487, 487, 487, 487, 487, 487, 487, 487, 487, 487,
487, -1000, 100, -1000, -1000, 47, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, 39, 39, 350, -1000, -1000, -1000, -1000,
178, -1000, -1000, 157, -1000, 654, -1000, -1000, 196, -1000,
45, -1000, -1000, -1000, -1000, -1000, 43, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, 16, 171, 163, -1000, -1000, -1000,
408, 406, 167, 167, 167, 167, 52, 52, 119, 119,
119, 719, 700, 119, 119, 719, 52, 52, 119, 52,
406, -1000, 24, -1000, -1000, -1000, 340, -1000, 329, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, 488, -1000, -1000, -1000, -1000,
-1000, -1000, 56, 56, 18, 56, 72, 72, 215, 70,
-1000, -1000, 272, 270, 267, 260, 250, 234, 228, 225,
223, 216, 205, -1000, -1000, -1000, -1000, -1000, -1000, 165,
-1000, -1000, -1000, 30, -1000, 655, -1000, -1000, -1000, 56,
-1000, 14, 13, 487, -1000, -1000, -1000, 22, 27, 27,
27, 115, 186, 186, 22, 186, 22, -74, -1000, -1000,
-1000, -1000, -1000, 56, 56, -1000, -1000, -1000, 56, -1000,
-1000, -1000, -1000, -1000, -1000, 27, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 52, -1000,
28, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, 487, -1000, -1000, -1000, -1000, -1000,
-1000, 34, 34, 15, 34, 40, 40, 331, 32, -1000,
-1000, 204, 201, 199, 195, 193, 182, 181, 179, 175,
159, 136, -1000, -1000, -1000, -1000, -1000, -1000, 97, -1000,
-1000, -1000, 13, -1000, 654, -1000, -1000, -1000, 34, -1000,
11, 8, 486, -1000, -1000, -1000, 54, 174, 174, 174,
39, 41, 41, 54, 41, 54, -73, -1000, -1000, -1000,
-1000, -1000, 34, 34, -1000, -1000, -1000, 34, -1000, -1000,
-1000, -1000, -1000, -1000, 174, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, -1000, 165,
-1000, -1000, -1000, -1000,
}
var yyPgo = [...]int16{
0, 390, 13, 389, 5, 15, 388, 363, 387, 385,
12, 384, 209, 345, 383, 14, 382, 10, 11, 381,
379, 7, 378, 8, 4, 375, 2, 1, 3, 374,
27, 0, 373, 372, 17, 195, 371, 369, 6, 368,
365, 16, 364, 56, 359, 9, 358, 356, 352, 333,
331, 323, 304, 318, 306,
0, 378, 13, 377, 5, 16, 374, 275, 373, 372,
12, 370, 224, 354, 368, 14, 366, 10, 11, 365,
364, 7, 363, 8, 4, 357, 2, 1, 3, 344,
27, 0, 338, 332, 18, 194, 314, 312, 6, 311,
303, 17, 302, 43, 301, 9, 300, 282, 281, 280,
269, 255, 233, 238, 235,
}
var yyR1 = [...]int8{
@ -584,7 +584,7 @@ var yyR2 = [...]int8{
2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 3, 4, 2, 0,
3, 1, 2, 3, 3, 3, 3, 2, 2, 1,
3, 1, 2, 3, 3, 1, 3, 3, 2, 1,
2, 0, 3, 2, 1, 1, 3, 1, 3, 4,
1, 3, 5, 5, 1, 1, 1, 4, 3, 3,
2, 3, 1, 2, 3, 3, 3, 3, 3, 3,
@ -612,30 +612,30 @@ var yyChk = [...]int16{
52, 53, 54, 56, 57, 83, 58, 14, -34, -41,
2, 79, 85, 15, -41, -38, -38, -43, -1, 20,
-2, 12, -10, 2, 20, 7, 2, 4, 2, 4,
2, 24, -35, -42, -37, -47, 78, -35, -35, -35,
24, -35, -42, -37, -47, 78, -35, -35, -35, -35,
-35, -35, -35, -35, -35, -35, -35, -35, -35, -35,
-35, -35, -45, 57, 2, -31, -9, 2, -28, -30,
88, 89, 19, 9, 41, 57, -45, 2, -41, -34,
-17, 15, 2, -17, -40, 22, -38, 22, 20, 7,
2, -5, 2, 4, 54, 44, 55, -5, 20, -15,
25, 2, 25, 2, -19, 5, -29, -21, 12, -28,
-30, 16, -38, 82, 84, 80, 81, -38, -38, -38,
-35, -45, 57, 2, -31, -9, 2, -28, -30, 88,
89, 19, 9, 41, 57, -45, 2, -41, -34, -17,
15, 2, -17, -40, 22, -38, 22, 20, 7, 2,
-5, 2, 4, 54, 44, 55, -5, 20, -15, 25,
2, 25, 2, -19, 5, -29, -21, 12, -28, -30,
16, -38, 82, 84, 80, 81, -38, -38, -38, -38,
-38, -38, -38, -38, -38, -38, -38, -38, -38, -38,
-38, -38, -45, 15, -28, -28, 21, 6, 2, -16,
22, -4, -6, 25, 2, 62, 78, 63, 79, 64,
65, 66, 80, 81, 12, 82, 47, 48, 51, 67,
18, 68, 83, 84, 69, 70, 71, 72, 73, 88,
89, 59, 74, 75, 22, 7, 20, -2, 25, 2,
25, 2, 26, 26, -30, 26, 41, 57, -22, 24,
17, -23, 30, 28, 29, 35, 36, 37, 33, 31,
34, 32, 38, -17, -17, -18, -17, -18, 22, -45,
21, 2, 22, 7, 2, -38, -27, 19, -27, 26,
-27, -21, -21, 24, 17, 2, 17, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 21, 2,
22, -4, -27, 26, 26, 17, -23, -26, 57, -27,
-31, -31, -31, -28, -24, 14, -24, -26, -24, -26,
-11, 92, 93, 94, 95, -27, -27, -27, -25, -31,
24, 21, 2, 21, -31,
-38, -45, 15, -28, -28, 21, 6, 2, -16, 22,
-4, -6, 25, 2, 62, 78, 63, 79, 64, 65,
66, 80, 81, 12, 82, 47, 48, 51, 67, 18,
68, 83, 84, 69, 70, 71, 72, 73, 88, 89,
59, 74, 75, 22, 7, 20, -2, 25, 2, 25,
2, 26, 26, -30, 26, 41, 57, -22, 24, 17,
-23, 30, 28, 29, 35, 36, 37, 33, 31, 34,
32, 38, -17, -17, -18, -17, -18, 22, -45, 21,
2, 22, 7, 2, -38, -27, 19, -27, 26, -27,
-21, -21, 24, 17, 2, 17, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 21, 2, 22,
-4, -27, 26, 26, 17, -23, -26, 57, -27, -31,
-31, -31, -28, -24, 14, -24, -26, -24, -26, -11,
92, 93, 94, 95, -27, -27, -27, -25, -31, 24,
21, 2, 21, -31,
}
var yyDef = [...]int16{
@ -647,35 +647,35 @@ var yyDef = [...]int16{
18, 19, 0, 108, 233, 234, 0, 244, 0, 85,
86, -2, -2, -2, -2, -2, -2, -2, -2, -2,
-2, -2, -2, -2, -2, 227, 228, 0, 5, 100,
0, 128, 131, 0, 0, 139, 245, 140, 144, 43,
0, 128, 131, 0, 135, 139, 245, 140, 144, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 0, 0, 0, 0, 22,
23, 0, 0, 0, 61, 0, 83, 84, 0, 89,
91, 0, 95, 99, 126, 0, 132, 0, 137, 0,
138, 143, 0, 42, 47, 48, 44, 0, 0, 0,
91, 0, 95, 99, 126, 0, 132, 0, 138, 0,
143, 0, 42, 47, 48, 44, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 68, 0, 70, 71, 0, 73, 239, 240,
74, 75, 235, 236, 0, 0, 0, 82, 20, 21,
24, 0, 54, 25, 0, 63, 65, 67, 87, 0,
92, 0, 98, 229, 230, 231, 232, 0, 127, 130,
133, 135, 134, 136, 142, 145, 147, 150, 154, 155,
156, 0, 26, 0, 0, -2, -2, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 69, 0, 237, 238, 76, 0, 81, 0,
53, 56, 58, 59, 60, 198, 199, 200, 201, 202,
203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 62, 66, 88, 90, 93, 97,
94, 96, 0, 0, 0, 0, 0, 0, 0, 0,
160, 162, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 45, 46, 49, 247, 50, 72, 0,
78, 80, 51, 0, 57, 64, 146, 241, 148, 0,
151, 0, 0, 0, 158, 163, 159, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 77, 79,
52, 55, 149, 0, 0, 157, 161, 164, 0, 243,
165, 166, 167, 168, 169, 0, 170, 171, 172, 173,
174, 180, 181, 182, 183, 152, 153, 242, 0, 178,
0, 176, 179, 175, 177,
0, 68, 0, 70, 71, 0, 73, 239, 240, 74,
75, 235, 236, 0, 0, 0, 82, 20, 21, 24,
0, 54, 25, 0, 63, 65, 67, 87, 0, 92,
0, 98, 229, 230, 231, 232, 0, 127, 130, 133,
136, 134, 137, 142, 145, 147, 150, 154, 155, 156,
0, 26, 0, 0, -2, -2, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 69, 0, 237, 238, 76, 0, 81, 0, 53,
56, 58, 59, 60, 198, 199, 200, 201, 202, 203,
204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 62, 66, 88, 90, 93, 97, 94,
96, 0, 0, 0, 0, 0, 0, 0, 0, 160,
162, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 45, 46, 49, 247, 50, 72, 0, 78,
80, 51, 0, 57, 64, 146, 241, 148, 0, 151,
0, 0, 0, 158, 163, 159, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 77, 79, 52,
55, 149, 0, 0, 157, 161, 164, 0, 243, 165,
166, 167, 168, 169, 0, 170, 171, 172, 173, 174,
180, 181, 182, 183, 152, 153, 242, 0, 178, 0,
176, 179, 175, 177,
}
var yyTok1 = [...]int8{
@ -1623,10 +1623,9 @@ yydefault:
yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)}
}
case 135:
yyDollar = yyS[yypt-3 : yypt+1]
yyDollar = yyS[yypt-1 : yypt+1]
{
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
yyVAL.label = labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}
}
case 136:
yyDollar = yyS[yypt-3 : yypt+1]
@ -1635,9 +1634,9 @@ yydefault:
yyVAL.label = labels.Label{}
}
case 137:
yyDollar = yyS[yypt-2 : yypt+1]
yyDollar = yyS[yypt-3 : yypt+1]
{
yylex.(*parser).unexpected("label set", "\"=\"")
yylex.(*parser).unexpected("label set", "string")
yyVAL.label = labels.Label{}
}
case 138:

View file

@ -244,7 +244,8 @@ type seriesDescription struct {
values []SequenceValue
}
// ParseSeriesDesc parses the description of a time series.
// ParseSeriesDesc parses the description of a time series. It is only used in
// the PromQL testing framework code.
func ParseSeriesDesc(input string) (labels labels.Labels, values []SequenceValue, err error) {
p := NewParser(input)
p.lex.seriesDesc = true

View file

@ -2398,7 +2398,7 @@ var testExpr = []struct {
},
},
{
input: `sum by ("foo")({"some.metric"})`,
input: `sum by ("foo bar")({"some.metric"})`,
expected: &AggregateExpr{
Op: SUM,
Expr: &VectorSelector{
@ -2406,14 +2406,14 @@ var testExpr = []struct {
MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"),
},
PosRange: posrange.PositionRange{
Start: 15,
End: 30,
Start: 19,
End: 34,
},
},
Grouping: []string{"foo"},
Grouping: []string{"foo bar"},
PosRange: posrange.PositionRange{
Start: 0,
End: 31,
End: 35,
},
},
},
@ -4023,6 +4023,76 @@ func TestParseExpressions(t *testing.T) {
}
}
func TestParseSeriesDesc(t *testing.T) {
tests := []struct {
name string
input string
expectedLabels labels.Labels
expectedValues []SequenceValue
expectError string
}{
{
name: "empty string",
expectedLabels: labels.EmptyLabels(),
expectedValues: []SequenceValue{},
},
{
name: "simple line",
input: `http_requests{job="api-server", instance="0", group="production"}`,
expectedLabels: labels.FromStrings(
"__name__", "http_requests",
"group", "production",
"instance", "0",
"job", "api-server",
),
expectedValues: []SequenceValue{},
},
{
name: "label name characters that require quoting",
input: `{"http.requests", "service.name"="api-server", instance="0", group="canary"} 0+50x2`,
expectedLabels: labels.FromStrings(
"__name__", "http.requests",
"group", "canary",
"instance", "0",
"service.name", "api-server",
),
expectedValues: []SequenceValue{
{Value: 0, Omitted: false, Histogram: (*histogram.FloatHistogram)(nil)},
{Value: 50, Omitted: false, Histogram: (*histogram.FloatHistogram)(nil)},
{Value: 100, Omitted: false, Histogram: (*histogram.FloatHistogram)(nil)},
},
},
{
name: "confirm failure on junk after identifier",
input: `{"http.requests"xx} 0+50x2`,
expectError: `parse error: unexpected identifier "xx" in label set, expected "," or "}"`,
},
{
name: "confirm failure on bare operator after identifier",
input: `{"http.requests"=, x="y"} 0+50x2`,
expectError: `parse error: unexpected "," in label set, expected string`,
},
{
name: "confirm failure on unterminated string identifier",
input: `{"http.requests} 0+50x2`,
expectError: `parse error: unterminated quoted string`,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
l, v, err := ParseSeriesDesc(tc.input)
if tc.expectError != "" {
require.Contains(t, err.Error(), tc.expectError)
} else {
require.NoError(t, err)
require.True(t, labels.Equal(tc.expectedLabels, l))
require.Equal(t, tc.expectedValues, v)
}
})
}
}
// NaN has no equality. Thus, we need a separate test for it.
func TestNaNExpression(t *testing.T) {
expr, err := ParseExpr("NaN")

View file

@ -14,8 +14,10 @@
package parser
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"time"
@ -91,13 +93,20 @@ func (node *AggregateExpr) getAggOpStr() string {
}
func joinLabels(ss []string) string {
var bytea [1024]byte // On stack to avoid memory allocation while building the output.
b := bytes.NewBuffer(bytea[:0])
for i, s := range ss {
// If the label is already quoted, don't quote it again.
if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(string(model.LabelValue(s))) {
ss[i] = fmt.Sprintf("\"%s\"", s)
if i > 0 {
b.WriteString(", ")
}
if !model.IsValidLegacyMetricName(string(model.LabelValue(s))) {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), s))
} else {
b.WriteString(s)
}
}
return strings.Join(ss, ", ")
return b.String()
}
func (node *BinaryExpr) returnBool() string {

View file

@ -56,6 +56,10 @@ const (
DefaultMaxSamplesPerQuery = 10000
)
func init() {
model.NameValidationScheme = model.UTF8Validation
}
type TBRun interface {
testing.TB
Run(string, func(*testing.T)) bool
@ -66,7 +70,7 @@ var testStartTime = time.Unix(0, 0).UTC()
// LoadedStorage returns storage with generated data using the provided load statements.
// Non-load statements will cause test errors.
func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
test, err := newTest(t, input, false)
test, err := newTest(t, input, false, newTestStorage)
require.NoError(t, err)
for _, cmd := range test.cmds {
@ -77,7 +81,7 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
t.Errorf("only 'load' commands accepted, got '%s'", cmd)
}
}
return test.storage
return test.storage.(*teststorage.TestStorage)
}
// NewTestEngine creates a promql.Engine with enablePerStepStats, lookbackDelta and maxSamples, and returns it.
@ -108,6 +112,11 @@ func NewTestEngineWithOpts(tb testing.TB, opts promql.EngineOpts) *promql.Engine
// RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
RunBuiltinTestsWithStorage(t, engine, newTestStorage)
}
// RunBuiltinTestsWithStorage runs an acceptance test suite against the provided engine and storage.
func RunBuiltinTestsWithStorage(t TBRun, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true
@ -118,24 +127,29 @@ func RunBuiltinTests(t TBRun, engine promql.QueryEngine) {
t.Run(fn, func(t *testing.T) {
content, err := fs.ReadFile(testsFs, fn)
require.NoError(t, err)
RunTest(t, string(content), engine)
RunTestWithStorage(t, string(content), engine, newStorage)
})
}
}
// RunTest parses and runs the test against the provided engine.
func RunTest(t testutil.T, input string, engine promql.QueryEngine) {
require.NoError(t, runTest(t, input, engine, false))
RunTestWithStorage(t, input, engine, newTestStorage)
}
// RunTestWithStorage parses and runs the test against the provided engine and storage.
func RunTestWithStorage(t testutil.T, input string, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage) {
require.NoError(t, runTest(t, input, engine, newStorage, false))
}
// testTest allows tests to be run in "test-the-test" mode (true for
// testingMode). This is a special mode for testing test code execution itself.
func testTest(t testutil.T, input string, engine promql.QueryEngine) error {
return runTest(t, input, engine, true)
return runTest(t, input, engine, newTestStorage, true)
}
func runTest(t testutil.T, input string, engine promql.QueryEngine, testingMode bool) error {
test, err := newTest(t, input, testingMode)
func runTest(t testutil.T, input string, engine promql.QueryEngine, newStorage func(testutil.T) storage.Storage, testingMode bool) error {
test, err := newTest(t, input, testingMode, newStorage)
// Why do this before checking err? newTest() can create the test storage and then return an error,
// and we want to make sure to clean that up to avoid leaking goroutines.
@ -175,18 +189,20 @@ type test struct {
cmds []testCommand
storage *teststorage.TestStorage
open func(testutil.T) storage.Storage
storage storage.Storage
context context.Context
cancelCtx context.CancelFunc
}
// newTest returns an initialized empty Test.
func newTest(t testutil.T, input string, testingMode bool) (*test, error) {
func newTest(t testutil.T, input string, testingMode bool, newStorage func(testutil.T) storage.Storage) (*test, error) {
test := &test{
T: t,
cmds: []testCommand{},
testingMode: testingMode,
open: newStorage,
}
err := test.parse(input)
test.clear()
@ -194,6 +210,8 @@ func newTest(t testutil.T, input string, testingMode bool) (*test, error) {
return test, err
}
func newTestStorage(t testutil.T) storage.Storage { return teststorage.New(t) }
//go:embed testdata
var testsFs embed.FS
@ -1267,7 +1285,7 @@ func (t *test) clear() {
if t.cancelCtx != nil {
t.cancelCtx()
}
t.storage = teststorage.New(t)
t.storage = t.open(t.T)
t.context, t.cancelCtx = context.WithCancel(context.Background())
}

View file

@ -165,6 +165,8 @@ load 5m
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
{"http.requests", "service.name"="api-server", instance="0", group="canary"} 0+50x10
{"http.requests", "service.name"="api-server", instance="1", group="canary"} 0+60x10
`
testCases := map[string]struct {
@ -176,6 +178,12 @@ load 5m
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
`,
},
"instant query on UTF-8 metric with expected float result": {
input: testData + `
eval instant at 5m sum by ("service.name") ({"http.requests"})
{"service.name"="api-server"} 110
`,
},
"instant query with unexpected float result": {
@ -184,7 +192,7 @@ eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 80
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 80 for {group="canary"} but got 70`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): expected 80 for {group="canary"} but got 70`,
},
"instant query with expected histogram result": {
input: `
@ -230,7 +238,7 @@ eval instant at 0 testmetric
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result, has value 70`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): unexpected metric {group="canary"} in result, has value 70`,
},
"instant query, but result has an unexpected series with a histogram value": {
input: `
@ -248,7 +256,7 @@ eval instant at 5m sum by (group) (http_requests)
{group="canary"} 70
{group="test"} 100
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} with 3: [100.000000] not found`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): expected metric {group="test"} with 3: [100.000000] not found`,
},
"instant query expected to fail, and query fails": {
input: `
@ -334,7 +342,7 @@ eval_ordered instant at 50m sort(http_requests)
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`,
expectedError: `error in eval sort(http_requests) (line 10): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`,
},
"instant query with results expected to match provided order, but result has an unexpected series": {
input: testData + `
@ -343,7 +351,7 @@ eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`,
expectedError: `error in eval sort(http_requests) (line 10): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`,
},
"instant query with invalid timestamp": {
input: `eval instant at abc123 vector(0)`,
@ -362,7 +370,7 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 80 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`,
},
"range query with expected histogram values": {
input: `
@ -389,7 +397,7 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60 90
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 4 points for {group="production"}, but query time range cannot return this many points`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): expected 4 points for {group="production"}, but query time range cannot return this many points`,
},
"range query with missing point in result": {
input: `
@ -407,14 +415,14 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`,
},
"range query, but result has an unexpected series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result, has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points []`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): unexpected metric {group="canary"} in result, has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points []`,
},
"range query, but result is missing a series": {
input: testData + `
@ -423,7 +431,7 @@ eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="canary"} 0 70 140
{group="test"} 0 100 200
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} not found`,
expectedError: `error in eval sum by (group) (http_requests) (line 10): expected metric {group="test"} not found`,
},
"range query expected to fail, and query fails": {
input: `

View file

@ -234,11 +234,25 @@ clear
load 5m
http_requests{path="/foo"} 0 50 100 150 200
http_requests{path="/bar"} 200 150 100 50 0
http_requests_gauge{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}}+{{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}x5
http_requests_counter{path="/foo"} {{schema:0 sum:0 count:0 buckets:[0 0 0]}}+{{schema:0 sum:1 count:2 buckets:[1 1 1]}}x5
http_requests_mix{path="/foo"} 0 50 100 {{schema:0 sum:0 count:0 buckets:[0 0 0] counter_reset_hint:gauge}} {{schema:0 sum:1 count:2 buckets:[1 1 1] counter_reset_hint:gauge}}
eval instant at 20m delta(http_requests[20m])
{path="/foo"} 200
{path="/bar"} -200
eval instant at 20m delta(http_requests_gauge[20m])
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
# delta emits warn annotation for non-gauge histogram types.
eval_warn instant at 20m delta(http_requests_counter[20m])
{path="/foo"} {{schema:0 sum:4 count:8 buckets:[4 4 4]}}
# delta emits warn annotation for mix of histogram and floats.
eval_warn instant at 20m delta(http_requests_mix[20m])
#empty
clear
# Tests for idelta().
@ -256,6 +270,10 @@ clear
load 5m
testcounter_reset_middle_total 0+10x4 0+10x5
http_requests_total{job="app-server", instance="1", group="canary"} 0+80x10
testcounter_reset_middle_mix 0+10x4 0+10x5 {{schema:0 sum:1 count:1}} {{schema:1 sum:2 count:2}}
http_requests_mix{job="app-server", instance="1", group="canary"} 0+80x10 {{schema:0 sum:1 count:1}}
http_requests_histogram{job="app-server", instance="1", group="canary"} {{schema:0 sum:1 count:2}}x10
http_requests_inf{job="app-server", instance="1", group="canary"} -Inf 0+80x10 Inf
# deriv should return the same as rate in simple cases.
eval instant at 50m rate(http_requests_total{group="canary", instance="1", job="app-server"}[50m])
@ -268,6 +286,21 @@ eval instant at 50m deriv(http_requests_total{group="canary", instance="1", job=
eval instant at 50m deriv(testcounter_reset_middle_total[100m])
{} 0.010606060606060607
# deriv should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
eval_info instant at 110m deriv(http_requests_mix{group="canary", instance="1", job="app-server"}[110m])
{group="canary", instance="1", job="app-server"} 0.26666666666666666
eval_info instant at 100m deriv(testcounter_reset_middle_mix[110m])
{} 0.010606060606060607
# deriv should silently ignore ranges consisting only of histograms.
eval instant at 50m deriv(http_requests_histogram[60m])
#empty
# deriv should return NaN in case of +Inf or -Inf found.
eval instant at 100m deriv(http_requests_inf[100m])
{job="app-server", instance="1", group="canary"} NaN
# predict_linear should return correct result.
# X/s = [ 0, 300, 600, 900,1200,1500,1800,2100,2400,2700,3000]
# Y = [ 0, 10, 20, 30, 40, 0, 10, 20, 30, 40, 50]
@ -303,6 +336,21 @@ eval instant at 10m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3
eval instant at 70m predict_linear(testcounter_reset_middle_total[55m] @ 3000, 3600)
{} 89.54545454545455
# predict_linear should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 3000)
{} 70
eval_info instant at 60m predict_linear(testcounter_reset_middle_mix[60m], 50m)
{} 70
# predict_linear should silently ignore ranges consisting only of histograms.
eval instant at 60m predict_linear(http_requests_histogram[60m], 50m)
#empty
# predict_linear should return NaN in case of +Inf or -Inf found.
eval instant at 100m predict_linear(http_requests_inf[100m], 6000)
{job="app-server", instance="1", group="canary"} NaN
# With http_requests_total, there is a sample value exactly at the end of
# the range, and it has exactly the predicted value, so predict_linear
# can be emulated with deriv.
@ -706,6 +754,11 @@ load 10s
http_requests{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000
http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000
http_requests{job="api-server", instance="1", group="canary"} 0+40x2000
http_requests_mix{job="api-server", instance="0", group="production"} 0+10x1000 100+30x1000 {{schema:0 count:1 sum:2}}x1000
http_requests_mix{job="api-server", instance="1", group="production"} 0+20x1000 200+30x1000 {{schema:0 count:1 sum:2}}x1000
http_requests_mix{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 {{schema:0 count:1 sum:2}}x1000
http_requests_mix{job="api-server", instance="1", group="canary"} 0+40x2000 {{schema:0 count:1 sum:2}}x1000
http_requests_histogram{job="api-server", instance="1", group="canary"} {{schema:0 count:1 sum:2}}x1000
eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
{job="api-server", instance="0", group="production"} 8000
@ -713,6 +766,17 @@ eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1)
{job="api-server", instance="0", group="canary"} 24000
{job="api-server", instance="1", group="canary"} 32000
# double_exponential_smoothing should ignore histograms in a mixed range of floats and histograms, flagged by an info annotation.
eval_info instant at 20010s double_exponential_smoothing(http_requests_mix[1m], 0.01, 0.1)
{job="api-server", instance="0", group="production"} 30100
{job="api-server", instance="1", group="production"} 30200
{job="api-server", instance="0", group="canary"} 80300
{job="api-server", instance="1", group="canary"} 80000
# double_exponential_smoothing should silently ignore ranges consisting only of histograms.
eval instant at 10000s double_exponential_smoothing(http_requests_histogram[1m], 0.01, 0.1)
#empty
# negative trends
clear
load 10s
@ -1110,11 +1174,16 @@ clear
# Don't return anything when there's something there.
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests_histogram{job="api-server", instance="0", group="production"} {{schema:0 sum:1 count:1}}x11
eval instant at 50m absent(http_requests)
eval instant at 50m absent(sum(http_requests))
eval instant at 50m absent(http_requests_histogram)
eval instant at 50m absent(sum(http_requests_histogram))
clear
eval instant at 50m absent(sum(nonexistent{job="testjob", instance="testinstance"}))
@ -1162,6 +1231,7 @@ load 1m
httpd_handshake_failures_total{instance="127.0.0.1",job="node"} 1+1x15
httpd_log_lines_total{instance="127.0.0.1",job="node"} 1
ssl_certificate_expiry_seconds{job="ingress"} NaN NaN NaN NaN NaN
http_requests_histogram{path="/foo",instance="127.0.0.1",job="httpd"} {{schema:0 sum:1 count:1}}x11
eval instant at 5m absent_over_time(http_requests_total[5m])
@ -1205,6 +1275,16 @@ eval instant at 5m absent_over_time({job="ingress"}[4m])
eval instant at 10m absent_over_time({job="ingress"}[4m])
{job="ingress"} 1
eval instant at 10m absent_over_time(http_requests_histogram[5m])
eval instant at 10m absent_over_time(rate(http_requests_histogram[5m])[5m:1m])
eval instant at 20m absent_over_time(http_requests_histogram[5m])
{} 1
eval instant at 20m absent_over_time(rate(http_requests_histogram[5m])[5m:1m])
{} 1
clear
# Testdata for present_over_time()

View file

@ -0,0 +1,18 @@
groups:
- name: indeterminate
rules:
# This shouldn't run in parallel because of the open matcher
- record: job:http_requests:rate1m
expr: sum by (job)(rate(http_requests_total[1m]))
- record: job:http_requests:rate5m
expr: sum by (job)(rate(http_requests_total[5m]))
- record: job:http_requests:rate15m
expr: sum by (job)(rate(http_requests_total[15m]))
- record: job:http_requests:rate30m
expr: sum by (job)(rate(http_requests_total[30m]))
- record: job:http_requests:rate1h
expr: sum by (job)(rate(http_requests_total[1h]))
- record: job:http_requests:rate2h
expr: sum by (job)(rate(http_requests_total[2h]))
- record: matcher
expr: '{job="job1"}'

View file

@ -23,6 +23,9 @@ import (
"sync"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/promql/parser"
@ -30,9 +33,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
@ -44,19 +44,20 @@ import (
// Group is a set of rules that have a logical relation.
type Group struct {
name string
file string
interval time.Duration
queryOffset *time.Duration
limit int
rules []Rule
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
staleSeries []labels.Labels
opts *ManagerOptions
mtx sync.Mutex
evaluationTime time.Duration
lastEvaluation time.Time // Wall-clock time of most recent evaluation.
lastEvalTimestamp time.Time // Time slot used for most recent evaluation.
name string
file string
interval time.Duration
queryOffset *time.Duration
limit int
rules []Rule
seriesInPreviousEval []map[string]labels.Labels // One per Rule.
staleSeries []labels.Labels
opts *ManagerOptions
mtx sync.Mutex
evaluationTime time.Duration // Time it took to evaluate the group.
evaluationRuleTimeSum time.Duration // Sum of time it took to evaluate each rule in the group.
lastEvaluation time.Time // Wall-clock time of most recent evaluation.
lastEvalTimestamp time.Time // Time slot used for most recent evaluation.
shouldRestore bool
@ -115,6 +116,7 @@ func NewGroup(o GroupOptions) *Group {
metrics.EvalFailures.WithLabelValues(key)
metrics.GroupLastEvalTime.WithLabelValues(key)
metrics.GroupLastDuration.WithLabelValues(key)
metrics.GroupLastRuleDurationSum.WithLabelValues(key)
metrics.GroupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
metrics.GroupSamples.WithLabelValues(key)
metrics.GroupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
@ -370,6 +372,28 @@ func (g *Group) setEvaluationTime(dur time.Duration) {
g.evaluationTime = dur
}
// GetRuleEvaluationTimeSum returns the sum of the time it took to evaluate each rule in the group irrespective of concurrency.
func (g *Group) GetRuleEvaluationTimeSum() time.Duration {
g.mtx.Lock()
defer g.mtx.Unlock()
return g.evaluationRuleTimeSum
}
// updateRuleEvaluationTimeSum updates evaluationRuleTimeSum which is the sum of the time it took to evaluate each rule in the group irrespective of concurrency.
// It collects the times from the rules themselves.
func (g *Group) updateRuleEvaluationTimeSum() {
var sum time.Duration
for _, rule := range g.rules {
sum += rule.GetEvaluationDuration()
}
g.metrics.GroupLastRuleDurationSum.WithLabelValues(GroupKey(g.file, g.name)).Set(sum.Seconds())
g.mtx.Lock()
defer g.mtx.Unlock()
g.evaluationRuleTimeSum = sum
}
// GetLastEvaluation returns the time the last evaluation of the rule group took place.
func (g *Group) GetLastEvaluation() time.Time {
g.mtx.Lock()
@ -487,149 +511,147 @@ func (g *Group) CopyState(from *Group) {
// Rules can be evaluated concurrently if the `concurrent-rule-eval` feature flag is enabled.
func (g *Group) Eval(ctx context.Context, ts time.Time) {
var (
samplesTotal atomic.Float64
wg sync.WaitGroup
samplesTotal atomic.Float64
ruleQueryOffset = g.QueryOffset()
)
ruleQueryOffset := g.QueryOffset()
for i, rule := range g.rules {
select {
case <-g.done:
return
default:
eval := func(i int, rule Rule, cleanup func()) {
if cleanup != nil {
defer cleanup()
}
eval := func(i int, rule Rule, cleanup func()) {
if cleanup != nil {
defer cleanup()
logger := g.logger.With("name", rule.Name(), "index", i)
ctx, sp := otel.Tracer("").Start(ctx, "rule")
sp.SetAttributes(attribute.String("name", rule.Name()))
defer func(t time.Time) {
sp.End()
since := time.Since(t)
g.metrics.EvalDuration.Observe(since.Seconds())
rule.SetEvaluationDuration(since)
rule.SetEvaluationTimestamp(t)
}(time.Now())
if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() {
logger = logger.With("trace_id", sp.SpanContext().TraceID())
}
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
var eqc promql.ErrQueryCanceled
if !errors.As(err, &eqc) {
logger.Warn("Evaluating rule failed", "rule", rule, "err", err)
}
return
}
rule.SetHealth(HealthGood)
rule.SetLastError(nil)
samplesTotal.Add(float64(len(vector)))
logger := g.logger.With("name", rule.Name(), "index", i)
ctx, sp := otel.Tracer("").Start(ctx, "rule")
sp.SetAttributes(attribute.String("name", rule.Name()))
defer func(t time.Time) {
sp.End()
if ar, ok := rule.(*AlertingRule); ok {
ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
}
var (
numOutOfOrder = 0
numTooOld = 0
numDuplicates = 0
)
since := time.Since(t)
g.metrics.EvalDuration.Observe(since.Seconds())
rule.SetEvaluationDuration(since)
rule.SetEvaluationTimestamp(t)
}(time.Now())
if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() {
logger = logger.With("trace_id", sp.SpanContext().TraceID())
}
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
vector, err := rule.Eval(ctx, ruleQueryOffset, ts, g.opts.QueryFunc, g.opts.ExternalURL, g.Limit())
if err != nil {
app := g.opts.Appendable.Appender(ctx)
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
defer func() {
if err := app.Commit(); err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
// Canceled queries are intentional termination of queries. This normally
// happens on shutdown and thus we skip logging of any errors here.
var eqc promql.ErrQueryCanceled
if !errors.As(err, &eqc) {
logger.Warn("Evaluating rule failed", "rule", rule, "err", err)
}
logger.Warn("Rule sample appending failed", "err", err)
return
}
rule.SetHealth(HealthGood)
rule.SetLastError(nil)
samplesTotal.Add(float64(len(vector)))
g.seriesInPreviousEval[i] = seriesReturned
}()
if ar, ok := rule.(*AlertingRule); ok {
ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
for _, s := range vector {
if s.H != nil {
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
} else {
app.SetOptions(g.appOpts)
_, err = app.Append(0, s.Metric, s.T, s.F)
}
var (
numOutOfOrder = 0
numTooOld = 0
numDuplicates = 0
)
app := g.opts.Appendable.Appender(ctx)
seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
defer func() {
if err := app.Commit(); err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
logger.Warn("Rule sample appending failed", "err", err)
return
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
g.seriesInPreviousEval[i] = seriesReturned
}()
for _, s := range vector {
if s.H != nil {
_, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H)
} else {
app.SetOptions(g.appOpts)
_, err = app.Append(0, s.Metric, s.T, s.F)
switch {
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
numTooOld++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
default:
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
}
} else {
buf := [1024]byte{}
seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric
}
}
if numOutOfOrder > 0 {
logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
}
if numTooOld > 0 {
logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
}
if numDuplicates > 0 {
logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
}
if err != nil {
rule.SetHealth(HealthBad)
rule.SetLastError(err)
sp.SetStatus(codes.Error, err.Error())
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrTooOldSample):
numTooOld++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++
logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
default:
logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
}
} else {
buf := [1024]byte{}
seriesReturned[string(s.Metric.Bytes(buf[:]))] = s.Metric
for metric, lset := range g.seriesInPreviousEval[i] {
if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
errors.Is(unwrappedErr, storage.ErrTooOldSample),
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default:
logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err)
}
}
if numOutOfOrder > 0 {
logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
}
if numTooOld > 0 {
logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
}
if numDuplicates > 0 {
logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
}
}
}
for metric, lset := range g.seriesInPreviousEval[i] {
if _, ok := seriesReturned[metric]; !ok {
// Series no longer exposed, mark it stale.
_, err = app.Append(0, lset, timestamp.FromTime(ts.Add(-ruleQueryOffset)), math.Float64frombits(value.StaleNaN))
unwrappedErr := errors.Unwrap(err)
if unwrappedErr == nil {
unwrappedErr = err
}
switch {
case unwrappedErr == nil:
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample),
errors.Is(unwrappedErr, storage.ErrTooOldSample),
errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
// Do not count these in logging, as this is expected if series
// is exposed from a different rule.
default:
logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err)
}
}
}
var wg sync.WaitGroup
for i, rule := range g.rules {
select {
case <-g.done:
return
default:
}
if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) {
@ -643,7 +665,6 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
eval(i, rule, nil)
}
}
wg.Wait()
g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load())
@ -874,6 +895,7 @@ type Metrics struct {
GroupInterval *prometheus.GaugeVec
GroupLastEvalTime *prometheus.GaugeVec
GroupLastDuration *prometheus.GaugeVec
GroupLastRuleDurationSum *prometheus.GaugeVec
GroupLastRestoreDuration *prometheus.GaugeVec
GroupRules *prometheus.GaugeVec
GroupSamples *prometheus.GaugeVec
@ -952,6 +974,14 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
},
[]string{"rule_group"},
),
GroupLastRuleDurationSum: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "rule_group_last_rule_duration_sum_seconds",
Help: "The sum of time in seconds it took to evaluate each rule in the group regardless of concurrency. This should be higher than the group duration if rules are evaluated concurrently.",
},
[]string{"rule_group"},
),
GroupLastRestoreDuration: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
@ -989,6 +1019,7 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
m.GroupInterval,
m.GroupLastEvalTime,
m.GroupLastDuration,
m.GroupLastRuleDurationSum,
m.GroupLastRestoreDuration,
m.GroupRules,
m.GroupSamples,

View file

@ -82,6 +82,7 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.
timeSinceStart := time.Since(start)
g.metrics.IterationDuration.Observe(timeSinceStart.Seconds())
g.updateRuleEvaluationTimeSum()
g.setEvaluationTime(timeSinceStart)
g.setLastEvaluation(start)
g.setLastEvalTimestamp(evalTimestamp)
@ -453,6 +454,11 @@ type ruleDependencyController struct{}
// AnalyseRules implements RuleDependencyController.
func (c ruleDependencyController) AnalyseRules(rules []Rule) {
depMap := buildDependencyMap(rules)
if depMap == nil {
return
}
for _, r := range rules {
r.SetNoDependentRules(depMap.dependents(r) == 0)
r.SetNoDependencyRules(depMap.dependencies(r) == 0)

View file

@ -1985,7 +1985,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
require.Len(t, group.rules, ruleCount)
start := time.Now()
group.Eval(ctx, start)
DefaultEvalIterationFunc(ctx, group, start)
// Never expect more than 1 inflight query at a time.
require.EqualValues(t, 1, maxInflight.Load())
@ -1993,6 +1993,8 @@ func TestAsyncRuleEvaluation(t *testing.T) {
require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
// Each rule produces one vector.
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
// Group duration is higher than the sum of rule durations (group overhead).
require.GreaterOrEqual(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
}
})
@ -2023,7 +2025,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
require.Len(t, group.rules, ruleCount)
start := time.Now()
group.Eval(ctx, start)
DefaultEvalIterationFunc(ctx, group, start)
// Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
@ -2061,7 +2063,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
require.Len(t, group.rules, ruleCount)
start := time.Now()
group.Eval(ctx, start)
DefaultEvalIterationFunc(ctx, group, start)
// Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
@ -2100,7 +2102,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
start := time.Now()
group.Eval(ctx, start)
DefaultEvalIterationFunc(ctx, group, start)
// Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once.
require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals)
@ -2108,6 +2110,47 @@ func TestAsyncRuleEvaluation(t *testing.T) {
require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
// Each rule produces one vector.
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
// Group duration is less than the sum of rule durations
require.Less(t, group.GetEvaluationTime(), group.GetRuleEvaluationTimeSum())
}
})
t.Run("asynchronous evaluation of independent rules, with indeterminate. Should be synchronous", func(t *testing.T) {
t.Parallel()
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
inflightQueries := atomic.Int32{}
maxInflight := atomic.Int32{}
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
ruleCount := 7
opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
// Configure concurrency settings.
opts.ConcurrentEvalsEnabled = true
opts.MaxConcurrentEvals = int64(ruleCount) * 2
opts.RuleConcurrencyController = nil
ruleManager := NewManager(opts)
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_indeterminates.yaml"}...)
require.Empty(t, errs)
require.Len(t, groups, 1)
for _, group := range groups {
require.Len(t, group.rules, ruleCount)
start := time.Now()
group.Eval(ctx, start)
// Never expect more than 1 inflight query at a time.
require.EqualValues(t, 1, maxInflight.Load())
// Each rule should take at least 1 second to execute sequentially.
require.GreaterOrEqual(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
// Each rule produces one vector.
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
}
})
}
@ -2222,3 +2265,132 @@ func TestLabels_FromMaps(t *testing.T) {
require.Equal(t, expected, mLabels, "unexpected labelset")
}
func TestRuleDependencyController_AnalyseRules(t *testing.T) {
type expectedDependencies struct {
noDependentRules bool
noDependencyRules bool
}
testCases := []struct {
name string
ruleFile string
expected map[string]expectedDependencies
}{
{
name: "all independent rules",
ruleFile: "fixtures/rules_multiple_independent.yaml",
expected: map[string]expectedDependencies{
"job:http_requests:rate1m": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate5m": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate15m": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate30m": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate1h": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate2h": {
noDependentRules: true,
noDependencyRules: true,
},
},
},
{
name: "some dependent rules",
ruleFile: "fixtures/rules_multiple.yaml",
expected: map[string]expectedDependencies{
"job:http_requests:rate1m": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate5m": {
noDependentRules: true,
noDependencyRules: true,
},
"job:http_requests:rate15m": {
noDependentRules: false,
noDependencyRules: true,
},
"TooManyRequests": {
noDependentRules: true,
noDependencyRules: false,
},
},
},
{
name: "indeterminate rules",
ruleFile: "fixtures/rules_indeterminates.yaml",
expected: map[string]expectedDependencies{
"job:http_requests:rate1m": {
noDependentRules: false,
noDependencyRules: false,
},
"job:http_requests:rate5m": {
noDependentRules: false,
noDependencyRules: false,
},
"job:http_requests:rate15m": {
noDependentRules: false,
noDependencyRules: false,
},
"job:http_requests:rate30m": {
noDependentRules: false,
noDependencyRules: false,
},
"job:http_requests:rate1h": {
noDependentRules: false,
noDependencyRules: false,
},
"job:http_requests:rate2h": {
noDependentRules: false,
noDependencyRules: false,
},
"matcher": {
noDependentRules: false,
noDependencyRules: false,
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storage := teststorage.New(t)
t.Cleanup(func() { storage.Close() })
ruleManager := NewManager(&ManagerOptions{
Context: context.Background(),
Logger: promslog.NewNopLogger(),
Appendable: storage,
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
})
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, tc.ruleFile)
require.Empty(t, errs)
require.Len(t, groups, 1)
for _, g := range groups {
ruleManager.opts.RuleDependencyController.AnalyseRules(g.rules)
for _, r := range g.rules {
exp, ok := tc.expected[r.Name()]
require.Truef(t, ok, "rule not found in expected: %s", r.String())
require.Equalf(t, exp.noDependentRules, r.NoDependentRules(), "rule: %s", r.String())
require.Equalf(t, exp.noDependencyRules, r.NoDependencyRules(), "rule: %s", r.String())
}
}
})
}
}

View file

@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
"maps"
"net/http"
"net/http/httptest"
"net/url"
@ -38,6 +39,8 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/config"
@ -59,7 +62,7 @@ func init() {
func TestPopulateLabels(t *testing.T) {
cases := []struct {
in labels.Labels
in model.LabelSet
cfg *config.ScrapeConfig
res labels.Labels
resOrig labels.Labels
@ -67,10 +70,10 @@ func TestPopulateLabels(t *testing.T) {
}{
// Regular population of scrape config options.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
"custom": "value",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -101,14 +104,14 @@ func TestPopulateLabels(t *testing.T) {
// Pre-define/overwrite scrape config labels.
// Leave out port and expect it to be defaulted to scheme.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4",
model.SchemeLabel: "http",
model.MetricsPathLabel: "/custom",
model.JobLabel: "custom-job",
model.ScrapeIntervalLabel: "2s",
model.ScrapeTimeoutLabel: "2s",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -136,10 +139,10 @@ func TestPopulateLabels(t *testing.T) {
},
// Provide instance label. HTTPS port default for IPv6.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "[::1]",
model.InstanceLabel: "custom-instance",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -168,7 +171,7 @@ func TestPopulateLabels(t *testing.T) {
},
// Address label missing.
{
in: labels.FromStrings("custom", "value"),
in: model.LabelSet{"custom": "value"},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -182,7 +185,7 @@ func TestPopulateLabels(t *testing.T) {
},
// Address label missing, but added in relabelling.
{
in: labels.FromStrings("custom", "host:1234"),
in: model.LabelSet{"custom": "host:1234"},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -220,7 +223,7 @@ func TestPopulateLabels(t *testing.T) {
},
// Address label missing, but added in relabelling.
{
in: labels.FromStrings("custom", "host:1234"),
in: model.LabelSet{"custom": "host:1234"},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -258,10 +261,10 @@ func TestPopulateLabels(t *testing.T) {
},
// Invalid UTF-8 in label.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
"custom": "\xbd",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -275,10 +278,10 @@ func TestPopulateLabels(t *testing.T) {
},
// Invalid duration in interval label.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
model.ScrapeIntervalLabel: "2notseconds",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -292,10 +295,10 @@ func TestPopulateLabels(t *testing.T) {
},
// Invalid duration in timeout label.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
model.ScrapeTimeoutLabel: "2notseconds",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -309,10 +312,10 @@ func TestPopulateLabels(t *testing.T) {
},
// 0 interval in timeout label.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
model.ScrapeIntervalLabel: "0s",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -326,10 +329,10 @@ func TestPopulateLabels(t *testing.T) {
},
// 0 duration in timeout label.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
model.ScrapeTimeoutLabel: "0s",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -343,11 +346,11 @@ func TestPopulateLabels(t *testing.T) {
},
// Timeout less than interval.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:1000",
model.ScrapeIntervalLabel: "1s",
model.ScrapeTimeoutLabel: "2s",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -361,9 +364,9 @@ func TestPopulateLabels(t *testing.T) {
},
// Don't attach default port.
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -391,9 +394,9 @@ func TestPopulateLabels(t *testing.T) {
},
// verify that the default port is not removed (http).
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:80",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "http",
MetricsPath: "/metrics",
@ -421,9 +424,9 @@ func TestPopulateLabels(t *testing.T) {
},
// verify that the default port is not removed (https).
{
in: labels.FromMap(map[string]string{
in: model.LabelSet{
model.AddressLabel: "1.2.3.4:443",
}),
},
cfg: &config.ScrapeConfig{
Scheme: "https",
MetricsPath: "/metrics",
@ -451,27 +454,26 @@ func TestPopulateLabels(t *testing.T) {
},
}
for _, c := range cases {
in := c.in.Copy()
res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg)
in := maps.Clone(c.in)
lb := labels.NewBuilder(labels.EmptyLabels())
res, err := PopulateLabels(lb, c.cfg, c.in, nil)
if c.err != "" {
require.EqualError(t, err, c.err)
} else {
require.NoError(t, err)
testutil.RequireEqual(t, c.res, res)
PopulateDiscoveredLabels(lb, c.cfg, c.in, nil)
testutil.RequireEqual(t, c.resOrig, lb.Labels())
}
require.Equal(t, c.in, in)
testutil.RequireEqual(t, c.res, res)
testutil.RequireEqual(t, c.resOrig, orig)
require.Equal(t, c.in, in) // Check this wasn't altered by PopulateLabels().
}
}
func loadConfiguration(t testing.TB, c string) *config.Config {
t.Helper()
cfg := &config.Config{}
err := yaml.UnmarshalStrict([]byte(c), cfg)
require.NoError(t, err, "Unable to load YAML config.")
cfg, err := config.Load(c, promslog.NewNopLogger())
require.NoError(t, err)
return cfg
}
@ -724,33 +726,6 @@ scrape_configs:
require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools())
}
func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion bool) (*collectResultAppender, *Manager) {
app := &collectResultAppender{}
scrapeManager, err := NewManager(
&Options{
EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion,
skipOffsetting: true,
},
promslog.New(&promslog.Config{}),
nil,
&collectResultAppendable{app},
prometheus.NewRegistry(),
)
require.NoError(t, err)
require.NoError(t, scrapeManager.ApplyConfig(&config.Config{
GlobalConfig: config.GlobalConfig{
// Disable regular scrapes.
ScrapeInterval: model.Duration(9999 * time.Minute),
ScrapeTimeout: model.Duration(5 * time.Second),
ScrapeProtocols: []config.ScrapeProtocol{config.OpenMetricsText1_0_0, config.PrometheusProto},
},
ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test", HonorTimestamps: honorTimestamps}},
}))
return app, scrapeManager
}
func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server {
once := sync.Once{}
@ -789,6 +764,9 @@ func TestManagerCTZeroIngestion(t *testing.T) {
t.Run(fmt.Sprintf("withCT=%v", testWithCT), func(t *testing.T) {
for _, testCTZeroIngest := range []bool{false, true} {
t.Run(fmt.Sprintf("ctZeroIngest=%v", testCTZeroIngest), func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sampleTs := time.Now()
ctTs := time.Time{}
if testWithCT {
@ -797,10 +775,45 @@ func TestManagerCTZeroIngestion(t *testing.T) {
// TODO(bwplotka): Add more types than just counter?
encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, ctTs)
app, scrapeManager := setupScrapeManager(t, true, testCTZeroIngest)
// Perform the test.
doOneScrape(t, scrapeManager, app, setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded))
app := &collectResultAppender{}
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableCreatedTimestampZeroIngestion: testCTZeroIngest,
skipOffsetting: true,
}, &collectResultAppendable{app})
defer scrapeManager.Stop()
server := setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded)
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
testConfig := fmt.Sprintf(`
global:
# Disable regular scrapes.
scrape_interval: 9999m
scrape_timeout: 5s
scrape_configs:
- job_name: test
honor_timestamps: true
static_configs:
- targets: ['%s']
`, serverURL.Host)
applyConfig(t, testConfig, scrapeManager, discoveryManager)
// Wait for one scrape.
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
app.mtx.Lock()
defer app.mtx.Unlock()
// Check if scrape happened and grab the relevant samples.
if len(app.resultFloats) > 0 {
return nil
}
return errors.New("expected some float samples, got none")
}), "after 1 minute")
// Verify results.
// Verify what we got vs expectations around CT injection.
@ -825,7 +838,7 @@ func TestManagerCTZeroIngestion(t *testing.T) {
require.Len(t, createdSeriesSamples, 1)
// Conversion taken from common/expfmt.writeOpenMetricsFloat.
// We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder,
// but exists in OM https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
// but exists in OM https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
// We can implement this, but we want to potentially get rid of OM 1.0 CT lines
require.Equal(t, float64(timestamppb.New(ctTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f)
} else {
@ -871,39 +884,6 @@ func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName
}
}
func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender, server *httptest.Server) {
t.Helper()
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
// Add fake target directly into tsets + reload
manager.updateTsets(map[string][]*targetgroup.Group{
"test": {{
Targets: []model.LabelSet{{
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
model.AddressLabel: model.LabelValue(serverURL.Host),
}},
}},
})
manager.reload()
// Wait for one scrape.
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
appender.mtx.Lock()
defer appender.mtx.Unlock()
// Check if scrape happened and grab the relevant samples.
if len(appender.resultFloats) > 0 {
return nil
}
return errors.New("expected some float samples, got none")
}), "after 1 minute")
manager.Stop()
}
func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) {
for _, f := range floats {
if f.metric.Get(model.MetricNameLabel) == metricName {
@ -916,7 +896,7 @@ func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatS
// generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram,
// but in the form of dto.Histogram.
func generateTestHistogram(i int) *dto.Histogram {
helper := tsdbutil.GenerateTestHistogram(i)
helper := tsdbutil.GenerateTestHistogram(int64(i))
h := &dto.Histogram{}
h.SampleCount = proto.Uint64(helper.Count)
h.SampleSum = proto.Float64(helper.Sum)
@ -978,37 +958,22 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
app := &collectResultAppender{}
scrapeManager, err := NewManager(
&Options{
EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion,
EnableNativeHistogramsIngestion: true,
skipOffsetting: true,
},
promslog.New(&promslog.Config{}),
nil,
&collectResultAppendable{app},
prometheus.NewRegistry(),
)
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
require.NoError(t, scrapeManager.ApplyConfig(&config.Config{
GlobalConfig: config.GlobalConfig{
// Disable regular scrapes.
ScrapeInterval: model.Duration(9999 * time.Minute),
ScrapeTimeout: model.Duration(5 * time.Second),
// Ensure the proto is chosen. We need proto as it's the only protocol
// with the CT parsing support.
ScrapeProtocols: []config.ScrapeProtocol{config.PrometheusProto},
},
ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test"}},
}))
app := &collectResultAppender{}
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{
EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion,
EnableNativeHistogramsIngestion: true,
skipOffsetting: true,
}, &collectResultAppendable{app})
defer scrapeManager.Stop()
once := sync.Once{}
// Start fake HTTP target to that allow one scrape only.
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fail := true // TODO(bwplotka): Kill or use?
fail := true
once.Do(func() {
fail = false
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
@ -1031,22 +996,23 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
// Add fake target directly into tsets + reload. Normally users would use
// Manager.Run and wait for minimum 5s refresh interval.
scrapeManager.updateTsets(map[string][]*targetgroup.Group{
"test": {{
Targets: []model.LabelSet{{
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
model.AddressLabel: model.LabelValue(serverURL.Host),
}},
}},
})
scrapeManager.reload()
testConfig := fmt.Sprintf(`
global:
# Disable regular scrapes.
scrape_interval: 9999m
scrape_timeout: 5s
scrape_configs:
- job_name: test
static_configs:
- targets: ['%s']
`, serverURL.Host)
applyConfig(t, testConfig, scrapeManager, discoveryManager)
var got []histogramSample
// Wait for one scrape.
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
ctx, cancel = context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
app.mtx.Lock()
@ -1064,7 +1030,6 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
}
return errors.New("expected some histogram samples, got none")
}), "after 1 minute")
scrapeManager.Stop()
// Check for zero samples, assuming we only injected always one histogram sample.
// Did it contain CT to inject? If yes, was CT zero enabled?
@ -1118,9 +1083,17 @@ func applyConfig(
require.NoError(t, discoveryManager.ApplyConfig(c))
}
func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manager) {
func runManagers(t *testing.T, ctx context.Context, opts *Options, app storage.Appendable) (*discovery.Manager, *Manager) {
t.Helper()
if opts == nil {
opts = &Options{}
}
opts.DiscoveryReloadInterval = model.Duration(100 * time.Millisecond)
if app == nil {
app = nopAppendable{}
}
reg := prometheus.NewRegistry()
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
require.NoError(t, err)
@ -1132,10 +1105,10 @@ func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manage
discovery.Updatert(100*time.Millisecond),
)
scrapeManager, err := NewManager(
&Options{DiscoveryReloadInterval: model.Duration(100 * time.Millisecond)},
opts,
nil,
nil,
nopAppendable{},
app,
prometheus.NewRegistry(),
)
require.NoError(t, err)
@ -1213,7 +1186,7 @@ scrape_configs:
- files: ['%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
defer scrapeManager.Stop()
applyConfig(
@ -1312,7 +1285,7 @@ scrape_configs:
file_sd_configs:
- files: ['%s', '%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
defer scrapeManager.Stop()
applyConfig(
@ -1372,7 +1345,7 @@ scrape_configs:
file_sd_configs:
- files: ['%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
defer scrapeManager.Stop()
applyConfig(
@ -1439,7 +1412,7 @@ scrape_configs:
- targets: ['%s']
`
discoveryManager, scrapeManager := runManagers(t, ctx)
discoveryManager, scrapeManager := runManagers(t, ctx, nil, nil)
defer scrapeManager.Stop()
// Apply the initial config with an existing file

View file

@ -361,6 +361,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) {
bodySizeLimit: bodySizeLimit,
acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme),
acceptEncodingHeader: acceptEncodingHeader(enableCompression),
metrics: sp.metrics,
}
newLoop = sp.newLoop(scrapeLoopOptions{
target: t,
@ -449,7 +450,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
switch {
case nonEmpty:
all = append(all, t)
case !t.discoveredLabels.IsEmpty():
default:
if sp.config.KeepDroppedTargets == 0 || uint(len(sp.droppedTargets)) < sp.config.KeepDroppedTargets {
sp.droppedTargets = append(sp.droppedTargets, t)
}
@ -552,9 +553,9 @@ func (sp *scrapePool) sync(targets []*Target) {
if _, ok := uniqueLoops[hash]; !ok {
uniqueLoops[hash] = nil
}
// Need to keep the most updated labels information
// for displaying it in the Service Discovery web page.
sp.activeTargets[hash].SetDiscoveredLabels(t.DiscoveredLabels())
// Need to keep the most updated ScrapeConfig for
// displaying labels in the Service Discovery web page.
sp.activeTargets[hash].SetScrapeConfig(sp.config, t.tLabels, t.tgLabels)
}
}

View file

@ -212,7 +212,8 @@ func TestDroppedTargetsList(t *testing.T) {
sp.Sync(tgs)
require.Len(t, sp.droppedTargets, expectedLength)
require.Equal(t, expectedLength, sp.droppedTargetsCount)
require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels().String())
lb := labels.NewBuilder(labels.EmptyLabels())
require.Equal(t, expectedLabelSetString, sp.droppedTargets[0].DiscoveredLabels(lb).String())
// Check that count is still correct when we don't retain all dropped targets.
sp.config.KeepDroppedTargets = 1
@ -235,16 +236,19 @@ func TestDiscoveredLabelsUpdate(t *testing.T) {
}
sp.activeTargets = make(map[uint64]*Target)
t1 := &Target{
discoveredLabels: labels.FromStrings("label", "name"),
tLabels: model.LabelSet{"label": "name"},
scrapeConfig: sp.config,
}
sp.activeTargets[t1.hash()] = t1
t2 := &Target{
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
tLabels: model.LabelSet{"labelNew": "nameNew"},
scrapeConfig: sp.config,
}
sp.sync([]*Target{t2})
require.Equal(t, t2.DiscoveredLabels(), sp.activeTargets[t1.hash()].DiscoveredLabels())
lb := labels.NewBuilder(labels.EmptyLabels())
require.Equal(t, t2.DiscoveredLabels(lb), sp.activeTargets[t1.hash()].DiscoveredLabels(lb))
}
type testLoop struct {
@ -309,7 +313,8 @@ func TestScrapePoolStop(t *testing.T) {
for i := 0; i < numTargets; i++ {
t := &Target{
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
labels: labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i)),
scrapeConfig: &config.ScrapeConfig{},
}
l := &testLoop{}
d := time.Duration((i+1)*20) * time.Millisecond
@ -394,8 +399,8 @@ func TestScrapePoolReload(t *testing.T) {
for i := 0; i < numTargets; i++ {
labels := labels.FromStrings(model.AddressLabel, fmt.Sprintf("example.com:%d", i))
t := &Target{
labels: labels,
discoveredLabels: labels,
labels: labels,
scrapeConfig: &config.ScrapeConfig{},
}
l := &testLoop{}
d := time.Duration((i+1)*20) * time.Millisecond
@ -2689,6 +2694,7 @@ func TestTargetScraperScrapeOK(t *testing.T) {
model.SchemeLabel, serverURL.Scheme,
model.AddressLabel, serverURL.Host,
),
scrapeConfig: &config.ScrapeConfig{},
},
client: http.DefaultClient,
timeout: configTimeout,
@ -2739,6 +2745,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) {
model.SchemeLabel, serverURL.Scheme,
model.AddressLabel, serverURL.Host,
),
scrapeConfig: &config.ScrapeConfig{},
},
client: http.DefaultClient,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
@ -2794,6 +2801,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
model.SchemeLabel, serverURL.Scheme,
model.AddressLabel, serverURL.Host,
),
scrapeConfig: &config.ScrapeConfig{},
},
client: http.DefaultClient,
acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation),
@ -2837,6 +2845,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) {
model.SchemeLabel, serverURL.Scheme,
model.AddressLabel, serverURL.Host,
),
scrapeConfig: &config.ScrapeConfig{},
},
client: http.DefaultClient,
bodySizeLimit: bodySizeLimit,
@ -3107,7 +3116,8 @@ func TestReuseScrapeCache(t *testing.T) {
}
sp, _ = newScrapePool(cfg, app, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
t1 = &Target{
discoveredLabels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
labels: labels.FromStrings("labelNew", "nameNew", "labelNew1", "nameNew1", "labelNew2", "nameNew2"),
scrapeConfig: &config.ScrapeConfig{},
}
proxyURL, _ = url.Parse("http://localhost:2128")
)
@ -3291,7 +3301,8 @@ func TestReuseCacheRace(t *testing.T) {
buffers = pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) })
sp, _ = newScrapePool(cfg, app, 0, nil, buffers, &Options{}, newTestScrapeMetrics(t))
t1 = &Target{
discoveredLabels: labels.FromStrings("labelNew", "nameNew"),
labels: labels.FromStrings("labelNew", "nameNew"),
scrapeConfig: &config.ScrapeConfig{},
}
)
defer sp.stop()
@ -4475,7 +4486,9 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
model.SchemeLabel, serverURL.Scheme,
model.AddressLabel, serverURL.Host,
),
params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}},
scrapeConfig: &config.ScrapeConfig{
Params: url.Values{"count": []string{strconv.Itoa(scenario.metricsCount)}},
},
},
client: client,
timeout: time.Second,
@ -4831,3 +4844,44 @@ func newScrapableServer(scrapeText string) (s *httptest.Server, scrapedTwice cha
}
})), scrapedTwice
}
// Regression test for the panic fixed in https://github.com/prometheus/prometheus/pull/15523.
func TestScrapePoolScrapeAfterReload(t *testing.T) {
h := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte{0x42, 0x42})
},
))
t.Cleanup(h.Close)
cfg := &config.ScrapeConfig{
BodySizeLimit: 1,
JobName: "test",
Scheme: "http",
ScrapeInterval: model.Duration(100 * time.Millisecond),
ScrapeTimeout: model.Duration(100 * time.Millisecond),
EnableCompression: false,
ServiceDiscoveryConfigs: discovery.Configs{
&discovery.StaticConfig{
{
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(h.URL)}},
},
},
},
}
p, err := newScrapePool(cfg, &nopAppendable{}, 0, nil, nil, &Options{}, newTestScrapeMetrics(t))
require.NoError(t, err)
t.Cleanup(p.stop)
p.Sync([]*targetgroup.Group{
{
Targets: []model.LabelSet{{model.AddressLabel: model.LabelValue(strings.TrimPrefix(h.URL, "http://"))}},
Source: "test",
},
})
require.NoError(t, p.reload(cfg))
<-time.After(1 * time.Second)
}

View file

@ -45,12 +45,12 @@ const (
// Target refers to a singular HTTP or HTTPS endpoint.
type Target struct {
// Labels before any processing.
discoveredLabels labels.Labels
// Any labels that are added to this target and its metrics.
labels labels.Labels
// Additional URL parameters that are part of the target URL.
params url.Values
// ScrapeConfig used to create this target.
scrapeConfig *config.ScrapeConfig
// Target and TargetGroup labels used to create this target.
tLabels, tgLabels model.LabelSet
mtx sync.RWMutex
lastError error
@ -61,12 +61,13 @@ type Target struct {
}
// NewTarget creates a reasonably configured target for querying.
func NewTarget(labels, discoveredLabels labels.Labels, params url.Values) *Target {
func NewTarget(labels labels.Labels, scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) *Target {
return &Target{
labels: labels,
discoveredLabels: discoveredLabels,
params: params,
health: HealthUnknown,
labels: labels,
tLabels: tLabels,
tgLabels: tgLabels,
scrapeConfig: scrapeConfig,
health: HealthUnknown,
}
}
@ -168,11 +169,11 @@ func (t *Target) offset(interval time.Duration, offsetSeed uint64) time.Duration
}
// Labels returns a copy of the set of all public labels of the target.
func (t *Target) Labels(b *labels.ScratchBuilder) labels.Labels {
b.Reset()
func (t *Target) Labels(b *labels.Builder) labels.Labels {
b.Reset(labels.EmptyLabels())
t.labels.Range(func(l labels.Label) {
if !strings.HasPrefix(l.Name, model.ReservedLabelPrefix) {
b.Add(l.Name, l.Value)
b.Set(l.Name, l.Value)
}
})
return b.Labels()
@ -188,24 +189,31 @@ func (t *Target) LabelsRange(f func(l labels.Label)) {
}
// DiscoveredLabels returns a copy of the target's labels before any processing.
func (t *Target) DiscoveredLabels() labels.Labels {
func (t *Target) DiscoveredLabels(lb *labels.Builder) labels.Labels {
t.mtx.Lock()
defer t.mtx.Unlock()
return t.discoveredLabels.Copy()
cfg, tLabels, tgLabels := t.scrapeConfig, t.tLabels, t.tgLabels
t.mtx.Unlock()
PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels)
return lb.Labels()
}
// SetDiscoveredLabels sets new DiscoveredLabels.
func (t *Target) SetDiscoveredLabels(l labels.Labels) {
// SetScrapeConfig sets new ScrapeConfig.
func (t *Target) SetScrapeConfig(scrapeConfig *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) {
t.mtx.Lock()
defer t.mtx.Unlock()
t.discoveredLabels = l
t.scrapeConfig = scrapeConfig
t.tLabels = tLabels
t.tgLabels = tgLabels
}
// URL returns a copy of the target's URL.
func (t *Target) URL() *url.URL {
t.mtx.Lock()
configParams := t.scrapeConfig.Params
t.mtx.Unlock()
params := url.Values{}
for k, v := range t.params {
for k, v := range configParams {
params[k] = make([]string, len(v))
copy(params[k], v)
}
@ -420,10 +428,19 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels
return ref, nil
}
// PopulateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
// PopulateDiscoveredLabels sets base labels on lb from target and group labels and scrape configuration, before relabeling.
func PopulateDiscoveredLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) {
lb.Reset(labels.EmptyLabels())
for ln, lv := range tLabels {
lb.Set(string(ln), string(lv))
}
for ln, lv := range tgLabels {
if _, ok := tLabels[ln]; !ok {
lb.Set(string(ln), string(lv))
}
}
// Copy labels into the labelset for the target if they are not set already.
scrapeLabels := []labels.Label{
{Name: model.JobLabel, Value: cfg.JobName},
@ -444,44 +461,49 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab
lb.Set(name, v[0])
}
}
}
preRelabelLabels := lb.Labels()
// PopulateLabels builds labels from target and group labels and scrape configuration,
// performs defined relabeling, checks validity, and adds Prometheus standard labels such as 'instance'.
// A return of empty labels and nil error means the target was dropped by relabeling.
func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, tLabels, tgLabels model.LabelSet) (res labels.Labels, err error) {
PopulateDiscoveredLabels(lb, cfg, tLabels, tgLabels)
keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...)
// Check if the target was dropped.
if !keep {
return labels.EmptyLabels(), preRelabelLabels, nil
return labels.EmptyLabels(), nil
}
if v := lb.Get(model.AddressLabel); v == "" {
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
return labels.EmptyLabels(), errors.New("no address")
}
addr := lb.Get(model.AddressLabel)
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err
return labels.EmptyLabels(), err
}
interval := lb.Get(model.ScrapeIntervalLabel)
intervalDuration, err := model.ParseDuration(interval)
if err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err)
return labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err)
}
if time.Duration(intervalDuration) == 0 {
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0")
return labels.EmptyLabels(), errors.New("scrape interval cannot be 0")
}
timeout := lb.Get(model.ScrapeTimeoutLabel)
timeoutDuration, err := model.ParseDuration(timeout)
if err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err)
return labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err)
}
if time.Duration(timeoutDuration) == 0 {
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0")
return labels.EmptyLabels(), errors.New("scrape timeout cannot be 0")
}
if timeoutDuration > intervalDuration {
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
return labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval)
}
// Meta labels are deleted after relabelling. Other internal labels propagate to
@ -506,9 +528,9 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig lab
return nil
})
if err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err
return labels.EmptyLabels(), err
}
return res, preRelabelLabels, nil
return res, nil
}
// TargetsFromGroup builds targets based on the given TargetGroup and config.
@ -516,24 +538,12 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets [
targets = targets[:0]
failures := []error{}
for i, tlset := range tg.Targets {
lb.Reset(labels.EmptyLabels())
for ln, lv := range tlset {
lb.Set(string(ln), string(lv))
}
for ln, lv := range tg.Labels {
if _, ok := tlset[ln]; !ok {
lb.Set(string(ln), string(lv))
}
}
lset, origLabels, err := PopulateLabels(lb, cfg)
for i, tLabels := range tg.Targets {
lset, err := PopulateLabels(lb, cfg, tLabels, tg.Labels)
if err != nil {
failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err))
}
if !lset.IsEmpty() || !origLabels.IsEmpty() {
targets = append(targets, NewTarget(lset, origLabels, cfg.Params))
} else {
targets = append(targets, NewTarget(lset, cfg, tLabels, tg.Labels))
}
}
return targets, failures

View file

@ -43,8 +43,8 @@ const (
func TestTargetLabels(t *testing.T) {
target := newTestTarget("example.com:80", 0, labels.FromStrings("job", "some_job", "foo", "bar"))
want := labels.FromStrings(model.JobLabel, "some_job", "foo", "bar")
b := labels.NewScratchBuilder(0)
got := target.Labels(&b)
b := labels.NewBuilder(labels.EmptyLabels())
got := target.Labels(b)
require.Equal(t, want, got)
i := 0
target.LabelsRange(func(l labels.Label) {
@ -103,9 +103,11 @@ func TestTargetOffset(t *testing.T) {
}
func TestTargetURL(t *testing.T) {
params := url.Values{
"abc": []string{"foo", "bar", "baz"},
"xyz": []string{"hoo"},
scrapeConfig := &config.ScrapeConfig{
Params: url.Values{
"abc": []string{"foo", "bar", "baz"},
"xyz": []string{"hoo"},
},
}
labels := labels.FromMap(map[string]string{
model.AddressLabel: "example.com:1234",
@ -114,7 +116,7 @@ func TestTargetURL(t *testing.T) {
"__param_abc": "overwrite",
"__param_cde": "huu",
})
target := NewTarget(labels, labels, params)
target := NewTarget(labels, scrapeConfig, nil, nil)
// The reserved labels are concatenated into a full URL. The first value for each
// URL query parameter can be set/modified via labels as well.
@ -139,7 +141,7 @@ func newTestTarget(targetURL string, _ time.Duration, lbls labels.Labels) *Targe
lb.Set(model.AddressLabel, strings.TrimPrefix(targetURL, "http://"))
lb.Set(model.MetricsPathLabel, "/metrics")
return &Target{labels: lb.Labels()}
return &Target{labels: lb.Labels(), scrapeConfig: &config.ScrapeConfig{}}
}
func TestNewHTTPBearerToken(t *testing.T) {

View file

@ -26,7 +26,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install Go
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0
with:
go-version: 1.23.x
- name: Install snmp_exporter/generator dependencies

View file

@ -385,13 +385,13 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
}
func histogramSample(ts int64, hint histogram.CounterResetHint) hSample {
h := tsdbutil.GenerateTestHistogram(int(ts + 1))
h := tsdbutil.GenerateTestHistogram(ts + 1)
h.CounterResetHint = hint
return hSample{t: ts, h: h}
}
func floatHistogramSample(ts int64, hint histogram.CounterResetHint) fhSample {
fh := tsdbutil.GenerateTestFloatHistogram(int(ts + 1))
fh := tsdbutil.GenerateTestFloatHistogram(ts + 1)
fh.CounterResetHint = hint
return fhSample{t: ts, fh: fh}
}

View file

@ -104,9 +104,10 @@ var (
HelpRef: 15, // Symbolized writeV2RequestSeries1Metadata.Help.
UnitRef: 16, // Symbolized writeV2RequestSeries1Metadata.Unit.
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
Samples: []writev2.Sample{{Value: 1, Timestamp: 10}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{11, 12}, Value: 1, Timestamp: 10}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(10, &testHistogram), writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil))},
CreatedTimestamp: 1, // CT needs to be lower than the sample's timestamp.
},
{
LabelsRefs: []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, // Same series as first.
@ -116,9 +117,9 @@ var (
HelpRef: 17, // Symbolized writeV2RequestSeries2Metadata.Help.
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
Samples: []writev2.Sample{{Value: 2, Timestamp: 20}},
Exemplars: []writev2.Exemplar{{LabelsRefs: []uint32{13, 14}, Value: 2, Timestamp: 20}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(30, &testHistogram), writev2.FromFloatHistogram(40, testHistogram.ToFloat(nil))},
},
},
}
@ -140,9 +141,10 @@ func TestWriteV2RequestFixture(t *testing.T) {
HelpRef: st.Symbolize(writeV2RequestSeries1Metadata.Help),
UnitRef: st.Symbolize(writeV2RequestSeries1Metadata.Unit),
},
Samples: []writev2.Sample{{Value: 1, Timestamp: 1}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 1}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(1, &testHistogram), writev2.FromFloatHistogram(2, testHistogram.ToFloat(nil))},
Samples: []writev2.Sample{{Value: 1, Timestamp: 10}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar1LabelRefs, Value: 1, Timestamp: 10}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(10, &testHistogram), writev2.FromFloatHistogram(20, testHistogram.ToFloat(nil))},
CreatedTimestamp: 1,
},
{
LabelsRefs: labelRefs,
@ -151,9 +153,9 @@ func TestWriteV2RequestFixture(t *testing.T) {
HelpRef: st.Symbolize(writeV2RequestSeries2Metadata.Help),
// No unit.
},
Samples: []writev2.Sample{{Value: 2, Timestamp: 2}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 2}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(3, &testHistogram), writev2.FromFloatHistogram(4, testHistogram.ToFloat(nil))},
Samples: []writev2.Sample{{Value: 2, Timestamp: 20}},
Exemplars: []writev2.Exemplar{{LabelsRefs: exemplar2LabelRefs, Value: 2, Timestamp: 20}},
Histograms: []writev2.Histogram{writev2.FromIntHistogram(30, &testHistogram), writev2.FromFloatHistogram(40, testHistogram.ToFloat(nil))},
},
},
Symbols: st.Symbols(),
@ -163,6 +165,11 @@ func TestWriteV2RequestFixture(t *testing.T) {
}
func TestValidateLabelsAndMetricName(t *testing.T) {
oldScheme := model.NameValidationScheme
model.NameValidationScheme = model.LegacyValidation
defer func() {
model.NameValidationScheme = oldScheme
}()
tests := []struct {
input []prompb.Label
expectedErr string

View file

@ -22,7 +22,6 @@ import (
"strings"
"unicode"
"github.com/prometheus/prometheus/util/strutil"
"go.opentelemetry.io/collector/pdata/pmetric"
)
@ -30,7 +29,7 @@ import (
// OTLP metrics use the c/s notation as specified at https://ucum.org/ucum.html
// (See also https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/semantic_conventions/README.md#instrument-units)
// Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units
// OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units
// OpenMetrics specification for units: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#units-and-base-units
var unitMap = map[string]string{
// Time
"d": "days",
@ -96,9 +95,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
var metricName string
if !allowUTF8 {
// Regexp for metric name characters that should be replaced with _.
invalidMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:_]`)
// Simple case (no full normalization, no units, etc.).
metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool {
return invalidMetricCharRE.MatchString(string(r))
@ -120,19 +116,30 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix
return metricName
}
var (
nonMetricNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9:]`)
// Regexp for metric name characters that should be replaced with _.
invalidMetricCharRE = regexp.MustCompile(`[^a-zA-Z0-9:_]`)
multipleUnderscoresRE = regexp.MustCompile(`__+`)
)
// Build a normalized name for the specified metric.
func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string {
var translationFunc func(rune) bool
var nameTokens []string
var separators []string
if !allowUTF8 {
nonTokenMetricCharRE := regexp.MustCompile(`[^a-zA-Z0-9:]`)
translationFunc = func(r rune) bool { return nonTokenMetricCharRE.MatchString(string(r)) }
// Split metric name into "tokens" (of supported metric name runes).
// Note that this has the side effect of replacing multiple consecutive underscores with a single underscore.
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
nameTokens = strings.FieldsFunc(
metric.Name(),
func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) },
)
} else {
translationFunc = func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' }
translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' }
// Split metric name into "tokens" (of supported metric name runes).
nameTokens, separators = fieldsFunc(metric.Name(), translationFunc)
}
// Split metric name into "tokens" (of supported metric name runes).
// Note that this has the side effect of replacing multiple consecutive underscores with a single underscore.
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
nameTokens, separators := fieldsFunc(metric.Name(), translationFunc)
// Split unit at the '/' if any
unitTokens := strings.SplitN(metric.Unit(), "/", 2)
@ -201,12 +208,14 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri
nameTokens = append([]string{namespace}, nameTokens...)
}
// Build the string from the tokens + separators.
// If UTF-8 isn't allowed, we'll use underscores as separators.
var normalizedName string
if !allowUTF8 {
separators = []string{}
// Build the string from the tokens, separated with underscores
normalizedName = strings.Join(nameTokens, "_")
} else {
// Build the string from the tokens + separators.
normalizedName = join(nameTokens, separators, "_")
}
normalizedName := join(nameTokens, separators, "_")
// Metric name cannot start with a digit, so prefix it with "_" in this case
if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) {
@ -216,73 +225,12 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri
return normalizedName
}
// TrimPromSuffixes trims type and unit prometheus suffixes from a metric name.
// Following the [OpenTelemetry specs] for converting Prometheus Metric points to OTLP.
//
// [OpenTelemetry specs]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata
func TrimPromSuffixes(promName string, metricType pmetric.MetricType, unit string) string {
nameTokens := strings.Split(promName, "_")
if len(nameTokens) == 1 {
return promName
}
nameTokens = removeTypeSuffixes(nameTokens, metricType)
nameTokens = removeUnitSuffixes(nameTokens, unit)
return strings.Join(nameTokens, "_")
}
func removeTypeSuffixes(tokens []string, metricType pmetric.MetricType) []string {
switch metricType {
case pmetric.MetricTypeSum:
// Only counters are expected to have a type suffix at this point.
// for other types, suffixes are removed during scrape.
return removeSuffix(tokens, "total")
default:
return tokens
}
}
func removeUnitSuffixes(nameTokens []string, unit string) []string {
l := len(nameTokens)
unitTokens := strings.Split(unit, "_")
lu := len(unitTokens)
if lu == 0 || l <= lu {
return nameTokens
}
suffixed := true
for i := range unitTokens {
if nameTokens[l-i-1] != unitTokens[lu-i-1] {
suffixed = false
break
}
}
if suffixed {
return nameTokens[:l-lu]
}
return nameTokens
}
func removeSuffix(tokens []string, suffix string) []string {
l := len(tokens)
if tokens[l-1] == suffix {
return tokens[:l-1]
}
return tokens
}
// cleanUpUnit cleans up unit so it matches model.LabelNameRE.
func cleanUpUnit(unit string) string {
// Multiple consecutive underscores are replaced with a single underscore.
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
multipleUnderscoresRE := regexp.MustCompile(`__+`)
return strings.TrimPrefix(multipleUnderscoresRE.ReplaceAllString(
strutil.SanitizeLabelName(unit),
nonMetricNameCharRE.ReplaceAllString(unit, "_"),
"_",
), "_")
}

View file

@ -19,9 +19,7 @@ package prometheus
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/collector/pdata/pmetric"
)
func TestByte(t *testing.T) {
@ -39,6 +37,8 @@ func TestWhiteSpaces(t *testing.T) {
func TestNonStandardUnit(t *testing.T) {
require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false))
// The normal metric name character set is allowed in non-standard units.
require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), "", false))
}
func TestNonStandardUnitCounter(t *testing.T) {
@ -70,6 +70,12 @@ func TestHertz(t *testing.T) {
func TestPer(t *testing.T) {
require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false))
require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false))
// The normal metric name character set is allowed in non-standard units.
require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), "", false))
t.Run("invalid per unit", func(t *testing.T) {
require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), "", false))
})
}
func TestPercent(t *testing.T) {
@ -91,7 +97,7 @@ func TestAllowUTF8(t *testing.T) {
})
t.Run("disallow UTF8", func(t *testing.T) {
require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false))
require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", false))
require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.,!* & #"), "", false))
require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false))
require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false))
})
@ -140,36 +146,6 @@ func TestOTelReceivers(t *testing.T) {
require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false))
}
func TestTrimPromSuffixes(t *testing.T) {
assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes"))
assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent"))
assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds"))
assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1"))
assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio"))
assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes"))
assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second"))
assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour"))
assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes"))
assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds"))
assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, ""))
// These are not necessarily valid OM units, only tested for the sake of completeness.
assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}"))
assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}"))
assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}"))
assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections"))
assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests"))
// Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s"
assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1"))
assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s"))
assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%"))
assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s"))
assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s"))
}
func TestNamespace(t *testing.T) {
require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false))
require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false))

View file

@ -50,7 +50,7 @@ const (
createdSuffix = "_created"
// maxExemplarRunes is the maximum number of UTF-8 exemplar characters
// according to the prometheus specification
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars
maxExemplarRunes = 128
// Trace and Span id keys are defined as part of the spec:
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification%2Fmetrics%2Fdatamodel.md#exemplars-2

View file

@ -2077,7 +2077,7 @@ func createTimeseriesWithOldSamples(numSamples, numSeries int, extraLabels ...la
for j := 0; j < numSamples/2; j++ {
sample := record.RefSample{
Ref: chunks.HeadSeriesRef(i),
T: int64(int(time.Now().UnixMilli()) + j),
T: time.Now().UnixMilli() + int64(j),
V: float64(i),
}
samples = append(samples, sample)

View file

@ -48,6 +48,8 @@ type writeHandler struct {
samplesAppendedWithoutMetadata prometheus.Counter
acceptedProtoMsgs map[config.RemoteWriteProtoMsg]struct{}
ingestCTZeroSample bool
}
const maxAheadTime = 10 * time.Minute
@ -57,7 +59,7 @@ const maxAheadTime = 10 * time.Minute
//
// NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible
// as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write.
func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler {
func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg, ingestCTZeroSample bool) http.Handler {
protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{}
for _, acc := range acceptedProtoMsgs {
protoMsgs[acc] = struct{}{}
@ -78,6 +80,8 @@ func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable
Name: "remote_write_without_metadata_appended_samples_total",
Help: "The total number of received remote write samples (and histogram samples) which were ingested without corresponding metadata.",
}),
ingestCTZeroSample: ingestCTZeroSample,
}
return h
}
@ -394,6 +398,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
var ref storage.SeriesRef
// Samples.
if h.ingestCTZeroSample && len(ts.Samples) > 0 && ts.Samples[0].Timestamp != 0 && ts.CreatedTimestamp != 0 {
// CT only needs to be ingested for the first sample, it will be considered
// out of order for the rest.
ref, err = app.AppendCTZeroSample(ref, ls, ts.Samples[0].Timestamp, ts.CreatedTimestamp)
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) {
// Even for the first sample OOO is a common scenario because
// we can't tell if a CT was already ingested in a previous request.
// We ignore the error.
h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", ts.Samples[0].Timestamp)
}
}
for _, s := range ts.Samples {
ref, err = app.Append(ref, ls, s.GetTimestamp(), s.GetValue())
if err == nil {
@ -415,6 +430,17 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
// Native Histograms.
for _, hp := range ts.Histograms {
if h.ingestCTZeroSample && hp.Timestamp != 0 && ts.CreatedTimestamp != 0 {
// Differently from samples, we need to handle CT for each histogram instead of just the first one.
// This is because histograms and float histograms are stored separately, even if they have the same labels.
ref, err = h.handleHistogramZeroSample(app, ref, ls, hp, ts.CreatedTimestamp)
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) {
// Even for the first sample OOO is a common scenario because
// we can't tell if a CT was already ingested in a previous request.
// We ignore the error.
h.logger.Debug("Error when appending CT in remote write request", "err", err, "series", ls.String(), "created_timestamp", ts.CreatedTimestamp, "timestamp", hp.Timestamp)
}
}
if hp.IsFloatHistogram() {
ref, err = app.AppendHistogram(ref, ls, hp.Timestamp, nil, hp.ToFloatHistogram())
} else {
@ -479,6 +505,18 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs *
return samplesWithoutMetadata, http.StatusBadRequest, errors.Join(badRequestErrs...)
}
// handleHistogramZeroSample appends CT as a zero-value sample with CT value as the sample timestamp.
// It doens't return errors in case of out of order CT.
func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref storage.SeriesRef, l labels.Labels, hist writev2.Histogram, ct int64) (storage.SeriesRef, error) {
var err error
if hist.IsFloatHistogram() {
ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, nil, hist.ToFloatHistogram())
} else {
ref, err = app.AppendHistogramCTZeroSample(ref, l, hist.Timestamp, ct, hist.ToIntHistogram(), nil)
}
return ref, err
}
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
// writes them to the provided appendable.
func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler {

View file

@ -130,7 +130,7 @@ func TestRemoteWriteHandlerHeadersHandling_V1Message(t *testing.T) {
}
appendable := &mockAppendable{}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -231,7 +231,7 @@ func TestRemoteWriteHandlerHeadersHandling_V2Message(t *testing.T) {
}
appendable := &mockAppendable{}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -256,7 +256,7 @@ func TestRemoteWriteHandler_V1Message(t *testing.T) {
// in Prometheus, so keeping like this to not break existing 1.0 clients.
appendable := &mockAppendable{}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -310,14 +310,23 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
expectedCode int
expectedRespBody string
commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
commitErr error
appendSampleErr error
appendCTZeroSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
ingestCTZeroSample bool
}{
{
desc: "All timeseries accepted",
desc: "All timeseries accepted/ct_enabled",
input: writeV2RequestFixture.Timeseries,
expectedCode: http.StatusNoContent,
ingestCTZeroSample: true,
},
{
desc: "All timeseries accepted/ct_disabled",
input: writeV2RequestFixture.Timeseries,
expectedCode: http.StatusNoContent,
},
@ -440,13 +449,14 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{
commitErr: tc.commitErr,
appendSampleErr: tc.appendSampleErr,
appendHistogramErr: tc.appendHistogramErr,
appendExemplarErr: tc.appendExemplarErr,
updateMetadataErr: tc.updateMetadataErr,
commitErr: tc.commitErr,
appendSampleErr: tc.appendSampleErr,
appendCTZeroSampleErr: tc.appendCTZeroSampleErr,
appendHistogramErr: tc.appendHistogramErr,
appendExemplarErr: tc.appendExemplarErr,
updateMetadataErr: tc.updateMetadataErr,
}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}, tc.ingestCTZeroSample)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -489,15 +499,27 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) {
ls := ts.ToLabels(&b, writeV2RequestFixture.Symbols)
for _, s := range ts.Samples {
if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample {
requireEqual(t, mockSample{ls, ts.CreatedTimestamp, 0}, appendable.samples[i])
i++
}
requireEqual(t, mockSample{ls, s.Timestamp, s.Value}, appendable.samples[i])
i++
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
fh := hp.ToFloatHistogram()
if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample {
requireEqual(t, mockHistogram{ls, ts.CreatedTimestamp, nil, &histogram.FloatHistogram{}}, appendable.histograms[k])
k++
}
requireEqual(t, mockHistogram{ls, hp.Timestamp, nil, fh}, appendable.histograms[k])
} else {
h := hp.ToIntHistogram()
if ts.CreatedTimestamp != 0 && tc.ingestCTZeroSample {
requireEqual(t, mockHistogram{ls, ts.CreatedTimestamp, &histogram.Histogram{}, nil}, appendable.histograms[k])
k++
}
requireEqual(t, mockHistogram{ls, hp.Timestamp, h, nil}, appendable.histograms[k])
}
k++
@ -545,7 +567,7 @@ func TestOutOfOrderSample_V1Message(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -587,7 +609,7 @@ func TestOutOfOrderExemplar_V1Message(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -625,7 +647,7 @@ func TestOutOfOrderHistogram_V1Message(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{latestSample: map[uint64]int64{labels.FromStrings("__name__", "test_metric").Hash(): 100}}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -656,7 +678,7 @@ func BenchmarkRemoteWriteHandler(b *testing.B) {
appendable := &mockAppendable{}
// TODO: test with other proto format(s)
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
b.ResetTimer()
@ -673,7 +695,7 @@ func TestCommitErr_V1Message(t *testing.T) {
require.NoError(t, err)
appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -697,7 +719,7 @@ func TestCommitErr_V2Message(t *testing.T) {
req.Header.Set(RemoteWriteVersionHeader, RemoteWriteVersion20HeaderValue)
appendable := &mockAppendable{commitErr: errors.New("commit error")}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV2}, false)
recorder := httptest.NewRecorder()
handler.ServeHTTP(recorder, req)
@ -724,7 +746,7 @@ func BenchmarkRemoteWriteOOOSamples(b *testing.B) {
require.NoError(b, db.Close())
})
// TODO: test with other proto format(s)
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1})
handler := NewWriteHandler(promslog.NewNopLogger(), nil, db.Head(), []config.RemoteWriteProtoMsg{config.RemoteWriteProtoMsgV1}, false)
buf, _, _, err := buildWriteRequest(nil, genSeriesWithSample(1000, 200*time.Minute.Milliseconds()), nil, nil, nil, nil, "snappy")
require.NoError(b, err)
@ -775,15 +797,17 @@ type mockAppendable struct {
latestExemplar map[uint64]int64
exemplars []mockExemplar
latestHistogram map[uint64]int64
latestFloatHist map[uint64]int64
histograms []mockHistogram
metadata []mockMetadata
// optional errors to inject.
commitErr error
appendSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
commitErr error
appendSampleErr error
appendCTZeroSampleErr error
appendHistogramErr error
appendExemplarErr error
updateMetadataErr error
}
type mockSample struct {
@ -827,6 +851,9 @@ func (m *mockAppendable) Appender(_ context.Context) storage.Appender {
if m.latestHistogram == nil {
m.latestHistogram = map[uint64]int64{}
}
if m.latestFloatHist == nil {
m.latestFloatHist = map[uint64]int64{}
}
if m.latestExemplar == nil {
m.latestExemplar = map[uint64]int64{}
}
@ -900,7 +927,12 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
return 0, m.appendHistogramErr
}
latestTs := m.latestHistogram[l.Hash()]
var latestTs int64
if h != nil {
latestTs = m.latestHistogram[l.Hash()]
} else {
latestTs = m.latestFloatHist[l.Hash()]
}
if t < latestTs {
return 0, storage.ErrOutOfOrderSample
}
@ -915,15 +947,53 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
return 0, tsdb.ErrInvalidSample
}
m.latestHistogram[l.Hash()] = t
if h != nil {
m.latestHistogram[l.Hash()] = t
} else {
m.latestFloatHist[l.Hash()] = t
}
m.histograms = append(m.histograms, mockHistogram{l, t, h, fh})
return 0, nil
}
func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
// AppendCTZeroSample is no-op for remote-write for now.
// TODO(bwplotka/arthursens): Add support for PRW 2.0 for CT zero feature (but also we might
// replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218).
if m.appendCTZeroSampleErr != nil {
return 0, m.appendCTZeroSampleErr
}
// Created Timestamp can't be higher than the original sample's timestamp.
if ct > t {
return 0, storage.ErrOutOfOrderSample
}
var latestTs int64
if h != nil {
latestTs = m.latestHistogram[l.Hash()]
} else {
latestTs = m.latestFloatHist[l.Hash()]
}
if ct < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if ct == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
if l.IsEmpty() {
return 0, tsdb.ErrInvalidSample
}
if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates {
return 0, tsdb.ErrInvalidSample
}
if h != nil {
m.latestHistogram[l.Hash()] = ct
m.histograms = append(m.histograms, mockHistogram{l, ct, &histogram.Histogram{}, nil})
} else {
m.latestFloatHist[l.Hash()] = ct
m.histograms = append(m.histograms, mockHistogram{l, ct, nil, &histogram.FloatHistogram{}})
}
return 0, nil
}
@ -936,9 +1006,32 @@ func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp
return 0, nil
}
func (m *mockAppendable) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) {
// AppendCTZeroSample is no-op for remote-write for now.
// TODO(bwplotka): Add support for PRW 2.0 for CT zero feature (but also we might
// replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218).
func (m *mockAppendable) AppendCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64) (storage.SeriesRef, error) {
if m.appendCTZeroSampleErr != nil {
return 0, m.appendCTZeroSampleErr
}
// Created Timestamp can't be higher than the original sample's timestamp.
if ct > t {
return 0, storage.ErrOutOfOrderSample
}
latestTs := m.latestSample[l.Hash()]
if ct < latestTs {
return 0, storage.ErrOutOfOrderSample
}
if ct == latestTs {
return 0, storage.ErrDuplicateSampleForTimestamp
}
if l.IsEmpty() {
return 0, tsdb.ErrInvalidSample
}
if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates {
return 0, tsdb.ErrInvalidSample
}
m.latestSample[l.Hash()] = ct
m.samples = append(m.samples, mockSample{l, ct, 0})
return 0, nil
}

View file

@ -2320,7 +2320,7 @@ func isTmpDir(fi fs.DirEntry) bool {
fn := fi.Name()
ext := filepath.Ext(fn)
if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy {
if strings.HasPrefix(fn, "checkpoint.") {
if strings.HasPrefix(fn, wlog.CheckpointPrefix) {
return true
}
if strings.HasPrefix(fn, chunkSnapshotPrefix) {

View file

@ -4101,7 +4101,7 @@ func TestOOOWALWrite(t *testing.T) {
},
"integer histogram": {
appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) {
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestHistogram(int(mins)), nil)
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestHistogram(mins), nil)
require.NoError(t, err)
return seriesRef, nil
},
@ -4192,7 +4192,7 @@ func TestOOOWALWrite(t *testing.T) {
},
"float histogram": {
appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) {
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestFloatHistogram(int(mins)))
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestFloatHistogram(mins))
require.NoError(t, err)
return seriesRef, nil
},
@ -4283,7 +4283,7 @@ func TestOOOWALWrite(t *testing.T) {
},
"custom buckets histogram": {
appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) {
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestCustomBucketsHistogram(int(mins)), nil)
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestCustomBucketsHistogram(mins), nil)
require.NoError(t, err)
return seriesRef, nil
},
@ -4374,7 +4374,7 @@ func TestOOOWALWrite(t *testing.T) {
},
"custom buckets float histogram": {
appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) {
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(mins)))
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestCustomBucketsFloatHistogram(mins))
require.NoError(t, err)
return seriesRef, nil
},
@ -4918,12 +4918,12 @@ func TestMultipleEncodingsCommitOrder(t *testing.T) {
return sample{t: ts, f: float64(ts)}
}
if valType == chunkenc.ValHistogram {
h := tsdbutil.GenerateTestHistogram(int(ts))
h := tsdbutil.GenerateTestHistogram(ts)
_, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
require.NoError(t, err)
return sample{t: ts, h: h}
}
fh := tsdbutil.GenerateTestFloatHistogram(int(ts))
fh := tsdbutil.GenerateTestFloatHistogram(ts)
_, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh)
require.NoError(t, err)
return sample{t: ts, fh: fh}
@ -5609,37 +5609,37 @@ func TestQuerierOOOQuery(t *testing.T) {
},
"integer histogram": {
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
h := tsdbutil.GenerateTestHistogram(int(ts))
h := tsdbutil.GenerateTestHistogram(ts)
if counterReset {
h.CounterResetHint = histogram.CounterReset
}
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
},
sampleFunc: func(ts int64) chunks.Sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"float histogram": {
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
fh := tsdbutil.GenerateTestFloatHistogram(int(ts))
fh := tsdbutil.GenerateTestFloatHistogram(ts)
if counterReset {
fh.CounterResetHint = histogram.CounterReset
}
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh)
},
sampleFunc: func(ts int64) chunks.Sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))}
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
},
},
"integer histogram counter resets": {
// Adding counter reset to all histograms means each histogram will have its own chunk.
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
h := tsdbutil.GenerateTestHistogram(int(ts))
h := tsdbutil.GenerateTestHistogram(ts)
h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument.
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
},
sampleFunc: func(ts int64) chunks.Sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
}
@ -5925,37 +5925,37 @@ func TestChunkQuerierOOOQuery(t *testing.T) {
},
"integer histogram": {
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
h := tsdbutil.GenerateTestHistogram(int(ts))
h := tsdbutil.GenerateTestHistogram(ts)
if counterReset {
h.CounterResetHint = histogram.CounterReset
}
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
},
sampleFunc: func(ts int64) chunks.Sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"float histogram": {
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
fh := tsdbutil.GenerateTestFloatHistogram(int(ts))
fh := tsdbutil.GenerateTestFloatHistogram(ts)
if counterReset {
fh.CounterResetHint = histogram.CounterReset
}
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh)
},
sampleFunc: func(ts int64) chunks.Sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))}
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
},
},
"integer histogram counter resets": {
// Adding counter reset to all histograms means each histogram will have its own chunk.
appendFunc: func(app storage.Appender, ts int64, counterReset bool) (storage.SeriesRef, error) {
h := tsdbutil.GenerateTestHistogram(int(ts))
h := tsdbutil.GenerateTestHistogram(ts)
h.CounterResetHint = histogram.CounterReset // For this scenario, ignore the counterReset argument.
return app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
},
sampleFunc: func(ts int64) chunks.Sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"integer histogram with recode": {
@ -7128,7 +7128,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
app := db.Appender(context.Background())
tsMs := ts * time.Minute.Milliseconds()
if floatHistogram {
h := tsdbutil.GenerateTestFloatHistogram(val)
h := tsdbutil.GenerateTestFloatHistogram(int64(val))
h.CounterResetHint = hint
_, err = app.AppendHistogram(0, l, tsMs, nil, h)
require.NoError(t, err)
@ -7136,7 +7136,7 @@ func TestOOOHistogramCompactionWithCounterResets(t *testing.T) {
return sample{t: tsMs, fh: h.Copy()}
}
h := tsdbutil.GenerateTestHistogram(val)
h := tsdbutil.GenerateTestHistogram(int64(val))
h.CounterResetHint = hint
_, err = app.AppendHistogram(0, l, tsMs, h, nil)
require.NoError(t, err)
@ -7487,14 +7487,14 @@ func TestInterleavedInOrderAndOOOHistogramCompactionWithCounterResets(t *testing
app := db.Appender(context.Background())
tsMs := ts
if floatHistogram {
h := tsdbutil.GenerateTestFloatHistogram(val)
h := tsdbutil.GenerateTestFloatHistogram(int64(val))
_, err = app.AppendHistogram(0, l, tsMs, nil, h)
require.NoError(t, err)
require.NoError(t, app.Commit())
return sample{t: tsMs, fh: h.Copy()}
}
h := tsdbutil.GenerateTestHistogram(val)
h := tsdbutil.GenerateTestHistogram(int64(val))
_, err = app.AppendHistogram(0, l, tsMs, h, nil)
require.NoError(t, err)
require.NoError(t, app.Commit())

View file

@ -126,7 +126,7 @@ The first row stores the starting id and the starting timestamp.
Series reference and timestamp are encoded as deltas w.r.t the first exemplar.
The first exemplar record begins at the second row.
See: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
See: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars
```
┌──────────────────────────────────────────────────────────────────┐

View file

@ -1048,7 +1048,7 @@ func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *ind
return h.cardinalityCache
}
h.cardinalityCacheKey = cacheKey
h.cardinalityCache = h.postings.Stats(statsByLabelName, limit)
h.cardinalityCache = h.postings.Stats(statsByLabelName, limit, labels.SizeOfLabels)
h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second
return h.cardinalityCache

View file

@ -103,20 +103,7 @@ func (h *headIndexReader) LabelNames(ctx context.Context, matchers ...*labels.Ma
// Postings returns the postings list iterator for the label pairs.
func (h *headIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) {
switch len(values) {
case 0:
return index.EmptyPostings(), nil
case 1:
return h.head.postings.Get(name, values[0]), nil
default:
res := make([]index.Postings, 0, len(values))
for _, value := range values {
if p := h.head.postings.Get(name, value); !index.IsEmptyPostingsType(p) {
res = append(res, p)
}
}
return index.Merge(ctx, res...), nil
}
return h.head.postings.Postings(ctx, name, values...), nil
}
func (h *headIndexReader) PostingsForLabelMatching(ctx context.Context, name string, match func(string) bool) index.Postings {

View file

@ -962,12 +962,12 @@ func TestHead_Truncate(t *testing.T) {
require.Nil(t, h.series.getByID(s3.ref))
require.Nil(t, h.series.getByID(s4.ref))
postingsA1, _ := index.ExpandPostings(h.postings.Get("a", "1"))
postingsA2, _ := index.ExpandPostings(h.postings.Get("a", "2"))
postingsB1, _ := index.ExpandPostings(h.postings.Get("b", "1"))
postingsB2, _ := index.ExpandPostings(h.postings.Get("b", "2"))
postingsC1, _ := index.ExpandPostings(h.postings.Get("c", "1"))
postingsAll, _ := index.ExpandPostings(h.postings.Get("", ""))
postingsA1, _ := index.ExpandPostings(h.postings.Postings(ctx, "a", "1"))
postingsA2, _ := index.ExpandPostings(h.postings.Postings(ctx, "a", "2"))
postingsB1, _ := index.ExpandPostings(h.postings.Postings(ctx, "b", "1"))
postingsB2, _ := index.ExpandPostings(h.postings.Postings(ctx, "b", "2"))
postingsC1, _ := index.ExpandPostings(h.postings.Postings(ctx, "c", "1"))
postingsAll, _ := index.ExpandPostings(h.postings.Postings(ctx, "", ""))
require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s1.ref)}, postingsA1)
require.Equal(t, []storage.SeriesRef{storage.SeriesRef(s2.ref)}, postingsA2)
@ -4732,7 +4732,7 @@ func TestOOOHistogramCounterResetHeaders(t *testing.T) {
// OOO histogram
for i := 1; i <= 5; i++ {
appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+i))
appendHistogram(100+int64(i), tsdbutil.GenerateTestHistogram(1000+int64(i)))
}
// Nothing mmapped yet.
checkOOOExpCounterResetHeader()
@ -4820,7 +4820,7 @@ func TestOOOHistogramCounterResetHeaders(t *testing.T) {
appendHistogram(300, tsdbutil.SetHistogramCounterReset(tsdbutil.GenerateTestHistogram(3000)))
for i := 1; i <= 4; i++ {
appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+i))
appendHistogram(300+int64(i), tsdbutil.GenerateTestHistogram(3000+int64(i)))
}
// One mmapped chunk with (ts, val) [(300, 3000), (301, 3001), (302, 3002), (303, 3003), (350, 4000)].

View file

@ -72,7 +72,7 @@ type MemPostings struct {
// lvs holds the label values for each label name.
// lvs[name] is essentially an unsorted append-only list of all keys in m[name]
// mtx must be held when interacting with lvs.
// Since it's append-only, it is safe to the label values slice after releasing the lock.
// Since it's append-only, it is safe to read the label values slice after releasing the lock.
lvs map[string][]string
ordered bool
@ -190,7 +190,8 @@ type PostingsStats struct {
}
// Stats calculates the cardinality statistics from postings.
func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
// Caller can pass in a function which computes the space required for n series with a given label.
func (p *MemPostings) Stats(label string, limit int, labelSizeFunc func(string, string, uint64) uint64) *PostingsStats {
var size uint64
p.mtx.RLock()
@ -218,7 +219,7 @@ func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
}
seriesCnt := uint64(len(values))
labelValuePairs.push(Stat{Name: n + "=" + name, Count: seriesCnt})
size += uint64(len(name)) * seriesCnt
size += labelSizeFunc(n, name, seriesCnt)
}
labelValueLength.push(Stat{Name: n, Count: size})
}
@ -234,25 +235,9 @@ func (p *MemPostings) Stats(label string, limit int) *PostingsStats {
}
}
// Get returns a postings list for the given label pair.
func (p *MemPostings) Get(name, value string) Postings {
var lp []storage.SeriesRef
p.mtx.RLock()
l := p.m[name]
if l != nil {
lp = l[value]
}
p.mtx.RUnlock()
if lp == nil {
return EmptyPostings()
}
return newListPostings(lp...)
}
// All returns a postings list over all documents ever added.
func (p *MemPostings) All() Postings {
return p.Get(AllPostingsKey())
return p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value)
}
// EnsureOrder ensures that all postings lists are sorted. After it returns all further
@ -489,7 +474,7 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
}
// Now `vals` only contains the values that matched, get their postings.
its := make([]Postings, 0, len(vals))
its := make([]*ListPostings, 0, len(vals))
lps := make([]ListPostings, len(vals))
p.mtx.RLock()
e := p.m[name]
@ -509,11 +494,27 @@ func (p *MemPostings) PostingsForLabelMatching(ctx context.Context, name string,
return Merge(ctx, its...)
}
// Postings returns a postings iterator for the given label values.
func (p *MemPostings) Postings(ctx context.Context, name string, values ...string) Postings {
res := make([]*ListPostings, 0, len(values))
lps := make([]ListPostings, len(values))
p.mtx.RLock()
postingsMapForName := p.m[name]
for i, value := range values {
if lp := postingsMapForName[value]; lp != nil {
lps[i] = ListPostings{list: lp}
res = append(res, &lps[i])
}
}
p.mtx.RUnlock()
return Merge(ctx, res...)
}
func (p *MemPostings) PostingsForAllLabelValues(ctx context.Context, name string) Postings {
p.mtx.RLock()
e := p.m[name]
its := make([]Postings, 0, len(e))
its := make([]*ListPostings, 0, len(e))
lps := make([]ListPostings, len(e))
i := 0
for _, refs := range e {
@ -659,7 +660,7 @@ func (it *intersectPostings) Err() error {
}
// Merge returns a new iterator over the union of the input iterators.
func Merge(_ context.Context, its ...Postings) Postings {
func Merge[T Postings](_ context.Context, its ...T) Postings {
if len(its) == 0 {
return EmptyPostings()
}
@ -674,19 +675,19 @@ func Merge(_ context.Context, its ...Postings) Postings {
return p
}
type mergedPostings struct {
p []Postings
h *loser.Tree[storage.SeriesRef, Postings]
type mergedPostings[T Postings] struct {
p []T
h *loser.Tree[storage.SeriesRef, T]
cur storage.SeriesRef
}
func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
func newMergedPostings[T Postings](p []T) (m *mergedPostings[T], nonEmpty bool) {
const maxVal = storage.SeriesRef(math.MaxUint64) // This value must be higher than all real values used in the tree.
lt := loser.New(p, maxVal)
return &mergedPostings{p: p, h: lt}, true
return &mergedPostings[T]{p: p, h: lt}, true
}
func (it *mergedPostings) Next() bool {
func (it *mergedPostings[T]) Next() bool {
for {
if !it.h.Next() {
return false
@ -700,7 +701,7 @@ func (it *mergedPostings) Next() bool {
}
}
func (it *mergedPostings) Seek(id storage.SeriesRef) bool {
func (it *mergedPostings[T]) Seek(id storage.SeriesRef) bool {
for !it.h.IsEmpty() && it.h.At() < id {
finished := !it.h.Winner().Seek(id)
it.h.Fix(finished)
@ -712,11 +713,11 @@ func (it *mergedPostings) Seek(id storage.SeriesRef) bool {
return true
}
func (it mergedPostings) At() storage.SeriesRef {
func (it mergedPostings[T]) At() storage.SeriesRef {
return it.cur
}
func (it mergedPostings) Err() error {
func (it mergedPostings[T]) Err() error {
for _, p := range it.p {
if err := p.Err(); err != nil {
return err

View file

@ -392,8 +392,8 @@ func BenchmarkMerge(t *testing.B) {
refs = append(refs, temp)
}
its := make([]Postings, len(refs))
for _, nSeries := range []int{1, 10, 100, 1000, 10000, 100000} {
its := make([]*ListPostings, len(refs))
for _, nSeries := range []int{1, 10, 10000, 100000} {
t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background()
for i := 0; i < bench.N; i++ {
@ -939,7 +939,7 @@ func BenchmarkPostings_Stats(b *testing.B) {
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
p.Stats("__name__", 10)
p.Stats("__name__", 10, labels.SizeOfLabels)
}
}
@ -954,7 +954,8 @@ func TestMemPostingsStats(t *testing.T) {
p.Add(2, labels.FromStrings("label", "value1"))
// call the Stats method to calculate the cardinality statistics
stats := p.Stats("label", 10)
// passing a fake calculation so we get the same result regardless of compilation -tags.
stats := p.Stats("label", 10, func(name, value string, n uint64) uint64 { return uint64(len(name)+len(value)) * n })
// assert that the expected statistics were calculated
require.Equal(t, uint64(2), stats.CardinalityMetricsStats[0].Count)
@ -963,7 +964,7 @@ func TestMemPostingsStats(t *testing.T) {
require.Equal(t, uint64(3), stats.CardinalityLabelStats[0].Count)
require.Equal(t, "label", stats.CardinalityLabelStats[0].Name)
require.Equal(t, uint64(24), stats.LabelValueStats[0].Count)
require.Equal(t, uint64(44), stats.LabelValueStats[0].Count)
require.Equal(t, "label", stats.LabelValueStats[0].Name)
require.Equal(t, uint64(2), stats.LabelValuePairsStats[0].Count)
@ -978,7 +979,7 @@ func TestMemPostings_Delete(t *testing.T) {
p.Add(2, labels.FromStrings("lbl1", "b"))
p.Add(3, labels.FromStrings("lbl2", "a"))
before := p.Get(allPostingsKey.Name, allPostingsKey.Value)
before := p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value)
deletedRefs := map[storage.SeriesRef]struct{}{
2: {},
}
@ -986,7 +987,7 @@ func TestMemPostings_Delete(t *testing.T) {
{Name: "lbl1", Value: "b"}: {},
}
p.Delete(deletedRefs, affectedLabels)
after := p.Get(allPostingsKey.Name, allPostingsKey.Value)
after := p.Postings(context.Background(), allPostingsKey.Name, allPostingsKey.Value)
// Make sure postings gotten before the delete have the old data when
// iterated over.
@ -1000,7 +1001,7 @@ func TestMemPostings_Delete(t *testing.T) {
require.NoError(t, err)
require.Equal(t, []storage.SeriesRef{1, 3}, expanded)
deleted := p.Get("lbl1", "b")
deleted := p.Postings(context.Background(), "lbl1", "b")
expanded, err = ExpandPostings(deleted)
require.NoError(t, err)
require.Empty(t, expanded, "expected empty postings, got %v", expanded)
@ -1072,7 +1073,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) {
return
default:
// Get a random value of this label.
p.Get(lbl, itoa(rand.Intn(10000))).Next()
p.Postings(context.Background(), lbl, itoa(rand.Intn(10000))).Next()
}
}
}(i)
@ -1409,12 +1410,15 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) {
slowRegexp := "^" + slowRegexpString() + "$"
b.Logf("Slow regexp length = %d", len(slowRegexp))
slow := regexp.MustCompile(slowRegexp)
const seriesPerLabel = 10
for _, labelValueCount := range []int{1_000, 10_000, 100_000} {
b.Run(fmt.Sprintf("labels=%d", labelValueCount), func(b *testing.B) {
mp := NewMemPostings()
for i := 0; i < labelValueCount; i++ {
mp.Add(storage.SeriesRef(i), labels.FromStrings("label", strconv.Itoa(i)))
for j := 0; j < seriesPerLabel; j++ {
mp.Add(storage.SeriesRef(i*seriesPerLabel+j), labels.FromStrings("__name__", strconv.Itoa(j), "label", strconv.Itoa(i)))
}
}
fp, err := ExpandPostings(mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString))
@ -1434,6 +1438,18 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) {
mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString).Next()
}
})
b.Run("matcher=all", func(b *testing.B) {
for i := 0; i < b.N; i++ {
// Match everything.
p := mp.PostingsForLabelMatching(context.Background(), "label", func(_ string) bool { return true })
var sum storage.SeriesRef
// Iterate through all results to exercise merge function.
for p.Next() {
sum += p.At()
}
}
})
})
}
}

View file

@ -54,12 +54,12 @@ func TestOOOInsert(t *testing.T) {
},
"integer histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"float histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))}
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
},
},
}
@ -118,12 +118,12 @@ func TestOOOInsertDuplicate(t *testing.T) {
},
"integer histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(ts))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(ts)}
},
},
"float histogram": {
sampleFunc: func(ts int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(ts))}
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(ts)}
},
},
}

View file

@ -271,7 +271,7 @@ func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matc
its = append(its, it)
case m.Type == labels.MatchNotRegexp && m.Value == ".+":
// .+ regexp matches any non-empty string: get postings for all label values and remove them.
its = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name))
notIts = append(notIts, ix.PostingsForAllLabelValues(ctx, m.Name))
case labelMustBeSet[m.Name]:
// If this matcher must be non-empty, we can be smarter.

View file

@ -1826,12 +1826,12 @@ func checkCurrVal(t *testing.T, valType chunkenc.ValueType, it *populateWithDelS
ts, h := it.AtHistogram(nil)
require.Equal(t, int64(expectedTs), ts)
h.CounterResetHint = histogram.UnknownCounterReset
require.Equal(t, tsdbutil.GenerateTestHistogram(expectedValue), h)
require.Equal(t, tsdbutil.GenerateTestHistogram(int64(expectedValue)), h)
case chunkenc.ValFloatHistogram:
ts, h := it.AtFloatHistogram(nil)
require.Equal(t, int64(expectedTs), ts)
h.CounterResetHint = histogram.UnknownCounterReset
require.Equal(t, tsdbutil.GenerateTestFloatHistogram(expectedValue), h)
require.Equal(t, tsdbutil.GenerateTestFloatHistogram(int64(expectedValue)), h)
default:
panic("unexpected value type")
}
@ -3017,6 +3017,15 @@ func TestPostingsForMatchers(t *testing.T) {
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "n", "1"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", "^.*$")},
exp: []labels.Labels{},
},
// Test shortcut i!~".+"
{
matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "n", ".*"), labels.MustNewMatcher(labels.MatchNotRegexp, "i", ".+")},
exp: []labels.Labels{
labels.FromStrings("n", "1"),
labels.FromStrings("n", "2"),
labels.FromStrings("n", "2.5"),
},
},
}
ir, err := h.Index()
@ -3579,16 +3588,16 @@ func TestQueryWithDeletedHistograms(t *testing.T) {
ctx := context.Background()
testcases := map[string]func(int) (*histogram.Histogram, *histogram.FloatHistogram){
"intCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestHistogram(i), nil
return tsdbutil.GenerateTestHistogram(int64(i)), nil
},
"intgauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return tsdbutil.GenerateTestGaugeHistogram(rand.Int() % 1000), nil
return tsdbutil.GenerateTestGaugeHistogram(rand.Int63() % 1000), nil
},
"floatCounter": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return nil, tsdbutil.GenerateTestFloatHistogram(i)
return nil, tsdbutil.GenerateTestFloatHistogram(int64(i))
},
"floatGauge": func(i int) (*histogram.Histogram, *histogram.FloatHistogram) {
return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int() % 1000)
return nil, tsdbutil.GenerateTestGaugeFloatHistogram(rand.Int63() % 1000)
},
}

View file

@ -65,67 +65,67 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
intHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
s := sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(int(value))}
return sample{t: ts, h: tsdbutil.GenerateTestHistogram(value)}
},
},
floatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
s := sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(int(value))}
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)}
},
},
customBucketsIntHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(int(value))}
s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(int(value))}
return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
},
},
customBucketsFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(value))}
s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(int(value))}
return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
},
},
gaugeIntHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
s := sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(int(value))}
return sample{t: ts, h: tsdbutil.GenerateTestGaugeHistogram(value)}
},
},
gaugeFloatHistogram: {
sampleType: sampleMetricTypeHistogram,
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
s := sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)}
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
return ref, s, err
},
sampleFunc: func(ts, value int64) sample {
return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(int(value))}
return sample{t: ts, fh: tsdbutil.GenerateTestGaugeFloatHistogram(value)}
},
},
}

View file

@ -21,7 +21,7 @@ import (
func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
for i := 0; i < n; i++ {
h := GenerateTestHistogram(i)
h := GenerateTestHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
@ -31,13 +31,13 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) {
}
func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram {
h := GenerateTestHistogram(n)
h := GenerateTestHistogram(int64(n))
h.CounterResetHint = hint
return h
}
// GenerateTestHistogram but it is up to the user to set any known counter reset hint.
func GenerateTestHistogram(i int) *histogram.Histogram {
func GenerateTestHistogram(i int64) *histogram.Histogram {
return &histogram.Histogram{
Count: 12 + uint64(i*9),
ZeroCount: 2 + uint64(i),
@ -48,18 +48,18 @@ func GenerateTestHistogram(i int) *histogram.Histogram {
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
PositiveBuckets: []int64{i + 1, 1, -1, 0},
NegativeSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{int64(i + 1), 1, -1, 0},
NegativeBuckets: []int64{i + 1, 1, -1, 0},
}
}
func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) {
for i := 0; i < n; i++ {
h := GenerateTestCustomBucketsHistogram(i)
h := GenerateTestCustomBucketsHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
@ -68,7 +68,7 @@ func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) {
return r
}
func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram {
func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram {
return &histogram.Histogram{
Count: 5 + uint64(i*4),
Sum: 18.4 * float64(i+1),
@ -77,20 +77,20 @@ func GenerateTestCustomBucketsHistogram(i int) *histogram.Histogram {
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
PositiveBuckets: []int64{i + 1, 1, -1, 0},
CustomValues: []float64{0, 1, 2, 3, 4},
}
}
func GenerateTestGaugeHistograms(n int) (r []*histogram.Histogram) {
for x := 0; x < n; x++ {
i := int(math.Sin(float64(x))*100) + 100
i := int64(math.Sin(float64(x))*100) + 100
r = append(r, GenerateTestGaugeHistogram(i))
}
return r
}
func GenerateTestGaugeHistogram(i int) *histogram.Histogram {
func GenerateTestGaugeHistogram(i int64) *histogram.Histogram {
h := GenerateTestHistogram(i)
h.CounterResetHint = histogram.GaugeType
return h
@ -98,7 +98,7 @@ func GenerateTestGaugeHistogram(i int) *histogram.Histogram {
func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for i := 0; i < n; i++ {
h := GenerateTestFloatHistogram(i)
h := GenerateTestFloatHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
@ -108,7 +108,7 @@ func GenerateTestFloatHistograms(n int) (r []*histogram.FloatHistogram) {
}
// GenerateTestFloatHistogram but it is up to the user to set any known counter reset hint.
func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 12 + float64(i*9),
ZeroCount: 2 + float64(i),
@ -130,7 +130,7 @@ func GenerateTestFloatHistogram(i int) *histogram.FloatHistogram {
func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for i := 0; i < n; i++ {
h := GenerateTestCustomBucketsFloatHistogram(i)
h := GenerateTestCustomBucketsFloatHistogram(int64(i))
if i > 0 {
h.CounterResetHint = histogram.NotCounterReset
}
@ -139,7 +139,7 @@ func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistog
return r
}
func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram {
func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 5 + float64(i*4),
Sum: 18.4 * float64(i+1),
@ -155,13 +155,13 @@ func GenerateTestCustomBucketsFloatHistogram(i int) *histogram.FloatHistogram {
func GenerateTestGaugeFloatHistograms(n int) (r []*histogram.FloatHistogram) {
for x := 0; x < n; x++ {
i := int(math.Sin(float64(x))*100) + 100
i := int64(math.Sin(float64(x))*100) + 100
r = append(r, GenerateTestGaugeFloatHistogram(i))
}
return r
}
func GenerateTestGaugeFloatHistogram(i int) *histogram.FloatHistogram {
func GenerateTestGaugeFloatHistogram(i int64) *histogram.FloatHistogram {
h := GenerateTestFloatHistogram(i)
h.CounterResetHint = histogram.GaugeType
return h

View file

@ -81,7 +81,8 @@ func DeleteCheckpoints(dir string, maxIndex int) error {
return errs.Err()
}
const checkpointPrefix = "checkpoint."
// CheckpointPrefix is the prefix used for checkpoint files.
const CheckpointPrefix = "checkpoint."
// Checkpoint creates a compacted checkpoint of segments in range [from, to] in the given WAL.
// It includes the most recent checkpoint if it exists.
@ -395,7 +396,7 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
}
func checkpointDir(dir string, i int) string {
return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i))
return filepath.Join(dir, fmt.Sprintf(CheckpointPrefix+"%08d", i))
}
type checkpointRef struct {
@ -411,13 +412,13 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) {
for i := 0; i < len(files); i++ {
fi := files[i]
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
if !strings.HasPrefix(fi.Name(), CheckpointPrefix) {
continue
}
if !fi.IsDir() {
return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name())
}
idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
idx, err := strconv.Atoi(fi.Name()[len(CheckpointPrefix):])
if err != nil {
continue
}

View file

@ -148,6 +148,7 @@ var (
HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo)
IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo)
HistogramIgnoredInAggregationInfo = fmt.Errorf("%w: ignored histogram in", PromQLInfo)
HistogramIgnoredInMixedRangeInfo = fmt.Errorf("%w: ignored histograms in a range containing both floats and histograms for metric name", PromQLInfo)
)
type annoErr struct {
@ -293,3 +294,10 @@ func NewHistogramIgnoredInAggregationInfo(aggregation string, pos posrange.Posit
Err: fmt.Errorf("%w %s aggregation", HistogramIgnoredInAggregationInfo, aggregation),
}
}
func NewHistogramIgnoredInMixedRangeInfo(metricName string, pos posrange.PositionRange) error {
return annoErr{
PositionRange: pos,
Err: fmt.Errorf("%w %q", HistogramIgnoredInMixedRangeInfo, metricName),
}
}

View file

@ -49,8 +49,8 @@ func (c *MockContext) Value(interface{}) interface{} {
// MockContextErrAfter is a MockContext that will return an error after a certain
// number of calls to Err().
type MockContextErrAfter struct {
count atomic.Uint64
MockContext
count atomic.Uint64
FailAfter uint64
}

View file

@ -258,6 +258,7 @@ func NewAPI(
rwEnabled bool,
acceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg,
otlpEnabled bool,
ctZeroIngestionEnabled bool,
) *API {
a := &API{
QueryEngine: qe,
@ -301,7 +302,7 @@ func NewAPI(
}
if rwEnabled {
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs)
a.remoteWriteHandler = remote.NewWriteHandler(logger, registerer, ap, acceptRemoteWriteProtoMsgs, ctZeroIngestionEnabled)
}
if otlpEnabled {
a.otlpWriteHandler = remote.NewOTLPWriteHandler(logger, ap, configFunc)
@ -1083,12 +1084,12 @@ func (api *API) targets(r *http.Request) apiFuncResult {
showActive := state == "" || state == "any" || state == "active"
showDropped := state == "" || state == "any" || state == "dropped"
res := &TargetDiscovery{}
builder := labels.NewBuilder(labels.EmptyLabels())
if showActive {
targetsActive := api.targetRetriever(r.Context()).TargetsActive()
activeKeys, numTargets := sortKeys(targetsActive)
res.ActiveTargets = make([]*Target, 0, numTargets)
builder := labels.NewScratchBuilder(0)
for _, key := range activeKeys {
if scrapePool != "" && key != scrapePool {
@ -1104,8 +1105,8 @@ func (api *API) targets(r *http.Request) apiFuncResult {
globalURL, err := getGlobalURL(target.URL(), api.globalURLOptions)
res.ActiveTargets = append(res.ActiveTargets, &Target{
DiscoveredLabels: target.DiscoveredLabels(),
Labels: target.Labels(&builder),
DiscoveredLabels: target.DiscoveredLabels(builder),
Labels: target.Labels(builder),
ScrapePool: key,
ScrapeURL: target.URL().String(),
GlobalURL: globalURL.String(),
@ -1143,7 +1144,7 @@ func (api *API) targets(r *http.Request) apiFuncResult {
}
for _, target := range targetsDropped[key] {
res.DroppedTargets = append(res.DroppedTargets, &DroppedTarget{
DiscoveredLabels: target.DiscoveredLabels(),
DiscoveredLabels: target.DiscoveredLabels(builder),
})
}
}
@ -1181,7 +1182,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult {
}
}
builder := labels.NewScratchBuilder(0)
builder := labels.NewBuilder(labels.EmptyLabels())
metric := r.FormValue("metric")
res := []metricMetadata{}
for _, tt := range api.targetRetriever(r.Context()).TargetsActive() {
@ -1189,7 +1190,7 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult {
if limit >= 0 && len(res) >= limit {
break
}
targetLabels := t.Labels(&builder)
targetLabels := t.Labels(builder)
// Filter targets that don't satisfy the label matchers.
if matchTarget != "" && !matchLabels(targetLabels, matchers) {
continue

View file

@ -103,12 +103,12 @@ type testTargetRetriever struct {
}
type testTargetParams struct {
Identifier string
Labels labels.Labels
DiscoveredLabels labels.Labels
Params url.Values
Reports []*testReport
Active bool
Identifier string
Labels labels.Labels
targetLabels model.LabelSet
Params url.Values
Reports []*testReport
Active bool
}
type testReport struct {
@ -124,7 +124,7 @@ func newTestTargetRetriever(targetsInfo []*testTargetParams) *testTargetRetrieve
droppedTargets = make(map[string][]*scrape.Target)
for _, t := range targetsInfo {
nt := scrape.NewTarget(t.Labels, t.DiscoveredLabels, t.Params)
nt := scrape.NewTarget(t.Labels, &config.ScrapeConfig{Params: t.Params}, t.targetLabels, nil)
for _, r := range t.Reports {
nt.Report(r.Start, r.Duration, r.Error)
@ -1004,10 +1004,9 @@ func setupTestTargetRetriever(t *testing.T) *testTargetRetriever {
model.ScrapeIntervalLabel: "15s",
model.ScrapeTimeoutLabel: "5s",
}),
DiscoveredLabels: labels.EmptyLabels(),
Params: url.Values{},
Reports: []*testReport{{scrapeStart, 70 * time.Millisecond, nil}},
Active: true,
Params: url.Values{},
Reports: []*testReport{{scrapeStart, 70 * time.Millisecond, nil}},
Active: true,
},
{
Identifier: "blackbox",
@ -1019,22 +1018,21 @@ func setupTestTargetRetriever(t *testing.T) *testTargetRetriever {
model.ScrapeIntervalLabel: "20s",
model.ScrapeTimeoutLabel: "10s",
}),
DiscoveredLabels: labels.EmptyLabels(),
Params: url.Values{"target": []string{"example.com"}},
Reports: []*testReport{{scrapeStart, 100 * time.Millisecond, errors.New("failed")}},
Active: true,
Params: url.Values{"target": []string{"example.com"}},
Reports: []*testReport{{scrapeStart, 100 * time.Millisecond, errors.New("failed")}},
Active: true,
},
{
Identifier: "blackbox",
Labels: labels.EmptyLabels(),
DiscoveredLabels: labels.FromMap(map[string]string{
targetLabels: model.LabelSet{
model.SchemeLabel: "http",
model.AddressLabel: "http://dropped.example.com:9115",
model.MetricsPathLabel: "/probe",
model.JobLabel: "blackbox",
model.ScrapeIntervalLabel: "30s",
model.ScrapeTimeoutLabel: "15s",
}),
},
Params: url.Values{},
Active: false,
},
@ -1507,7 +1505,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
response: &TargetDiscovery{
ActiveTargets: []*Target{
{
DiscoveredLabels: labels.FromStrings(),
DiscoveredLabels: labels.FromStrings("__param_target", "example.com", "__scrape_interval__", "0s", "__scrape_timeout__", "0s"),
Labels: labels.FromStrings("job", "blackbox"),
ScrapePool: "blackbox",
ScrapeURL: "http://localhost:9115/probe?target=example.com",
@ -1520,7 +1518,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
ScrapeTimeout: "10s",
},
{
DiscoveredLabels: labels.FromStrings(),
DiscoveredLabels: labels.FromStrings("__scrape_interval__", "0s", "__scrape_timeout__", "0s"),
Labels: labels.FromStrings("job", "test"),
ScrapePool: "test",
ScrapeURL: "http://example.com:8080/metrics",
@ -1556,7 +1554,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
response: &TargetDiscovery{
ActiveTargets: []*Target{
{
DiscoveredLabels: labels.FromStrings(),
DiscoveredLabels: labels.FromStrings("__param_target", "example.com", "__scrape_interval__", "0s", "__scrape_timeout__", "0s"),
Labels: labels.FromStrings("job", "blackbox"),
ScrapePool: "blackbox",
ScrapeURL: "http://localhost:9115/probe?target=example.com",
@ -1569,7 +1567,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
ScrapeTimeout: "10s",
},
{
DiscoveredLabels: labels.FromStrings(),
DiscoveredLabels: labels.FromStrings("__scrape_interval__", "0s", "__scrape_timeout__", "0s"),
Labels: labels.FromStrings("job", "test"),
ScrapePool: "test",
ScrapeURL: "http://example.com:8080/metrics",
@ -1605,7 +1603,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
response: &TargetDiscovery{
ActiveTargets: []*Target{
{
DiscoveredLabels: labels.FromStrings(),
DiscoveredLabels: labels.FromStrings("__param_target", "example.com", "__scrape_interval__", "0s", "__scrape_timeout__", "0s"),
Labels: labels.FromStrings("job", "blackbox"),
ScrapePool: "blackbox",
ScrapeURL: "http://localhost:9115/probe?target=example.com",
@ -1618,7 +1616,7 @@ func testEndpoints(t *testing.T, api *API, tr *testTargetRetriever, es storage.E
ScrapeTimeout: "10s",
},
{
DiscoveredLabels: labels.FromStrings(),
DiscoveredLabels: labels.FromStrings("__scrape_interval__", "0s", "__scrape_timeout__", "0s"),
Labels: labels.FromStrings("job", "test"),
ScrapePool: "test",
ScrapeURL: "http://example.com:8080/metrics",

View file

@ -142,6 +142,7 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route
false,
config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2},
false,
false,
)
promRouter := route.New().WithPrefix("/api/v1")

View file

@ -1,7 +1,7 @@
{
"name": "@prometheus-io/mantine-ui",
"private": true,
"version": "0.300.0",
"version": "0.300.1",
"type": "module",
"scripts": {
"start": "vite",
@ -12,62 +12,62 @@
"test": "vitest"
},
"dependencies": {
"@codemirror/autocomplete": "^6.18.1",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1",
"@codemirror/autocomplete": "^6.18.3",
"@codemirror/language": "^6.10.6",
"@codemirror/lint": "^6.8.4",
"@codemirror/state": "^6.4.1",
"@codemirror/view": "^6.34.1",
"@floating-ui/dom": "^1.6.7",
"@lezer/common": "^1.2.1",
"@floating-ui/dom": "^1.6.12",
"@lezer/common": "^1.2.3",
"@lezer/highlight": "^1.2.1",
"@mantine/code-highlight": "^7.13.1",
"@mantine/code-highlight": "^7.15.0",
"@mantine/core": "^7.11.2",
"@mantine/dates": "^7.13.1",
"@mantine/dates": "^7.15.0",
"@mantine/hooks": "^7.11.2",
"@mantine/notifications": "^7.13.1",
"@mantine/notifications": "^7.15.0",
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.300.0",
"@reduxjs/toolkit": "^2.2.1",
"@tabler/icons-react": "^3.19.0",
"@tanstack/react-query": "^5.59.0",
"@testing-library/jest-dom": "^6.5.0",
"@testing-library/react": "^16.0.1",
"@types/lodash": "^4.17.9",
"@prometheus-io/codemirror-promql": "0.300.1",
"@reduxjs/toolkit": "^2.5.0",
"@tabler/icons-react": "^3.24.0",
"@tanstack/react-query": "^5.62.7",
"@testing-library/jest-dom": "^6.6.3",
"@testing-library/react": "^16.1.0",
"@types/lodash": "^4.17.13",
"@types/sanitize-html": "^2.13.0",
"@uiw/react-codemirror": "^4.23.3",
"@uiw/react-codemirror": "^4.23.6",
"clsx": "^2.1.1",
"dayjs": "^1.11.10",
"lodash": "^4.17.21",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-infinite-scroll-component": "^6.1.0",
"react-redux": "^9.1.2",
"react-router-dom": "^6.26.2",
"sanitize-html": "^2.13.0",
"uplot": "^1.6.30",
"react-redux": "^9.2.0",
"react-router-dom": "^7.0.2",
"sanitize-html": "^2.13.1",
"uplot": "^1.6.31",
"uplot-react": "^1.2.2",
"use-query-params": "^2.2.1"
},
"devDependencies": {
"@eslint/compat": "^1.1.1",
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.11.1",
"@eslint/compat": "^1.2.4",
"@eslint/eslintrc": "^3.2.0",
"@eslint/js": "^9.16.0",
"@types/react": "^18.3.5",
"@types/react-dom": "^18.3.0",
"@typescript-eslint/eslint-plugin": "^6.21.0",
"@typescript-eslint/parser": "^6.21.0",
"@vitejs/plugin-react": "^4.2.1",
"eslint": "^9.11.1",
"eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830",
"eslint-plugin-react-refresh": "^0.4.12",
"globals": "^15.10.0",
"@vitejs/plugin-react": "^4.3.4",
"eslint": "^9.16.0",
"eslint-plugin-react-hooks": "^5.1.0",
"eslint-plugin-react-refresh": "^0.4.16",
"globals": "^15.13.0",
"jsdom": "^25.0.1",
"postcss": "^8.4.47",
"postcss-preset-mantine": "^1.17.0",
"postcss-simple-vars": "^7.0.1",
"vite": "^5.4.8",
"vitest": "^2.1.1"
"vite": "^6.0.3",
"vitest": "^2.1.8"
}
}

View file

@ -1,4 +1,12 @@
import { Popover, ActionIcon, Fieldset, Checkbox, Stack } from "@mantine/core";
import {
Popover,
ActionIcon,
Fieldset,
Checkbox,
Stack,
Group,
NumberInput,
} from "@mantine/core";
import { IconSettings } from "@tabler/icons-react";
import { FC } from "react";
import { useAppDispatch } from "../state/hooks";
@ -13,6 +21,8 @@ const SettingsMenu: FC = () => {
enableSyntaxHighlighting,
enableLinter,
showAnnotations,
ruleGroupsPerPage,
alertGroupsPerPage,
} = useSettings();
const dispatch = useAppDispatch();
@ -29,82 +39,126 @@ const SettingsMenu: FC = () => {
</ActionIcon>
</Popover.Target>
<Popover.Dropdown>
<Stack>
<Fieldset p="md" legend="Global settings">
<Checkbox
checked={useLocalTime}
label="Use local time"
onChange={(event) =>
dispatch(
updateSettings({ useLocalTime: event.currentTarget.checked })
)
}
/>
</Fieldset>
<Group align="flex-start">
<Stack>
<Fieldset p="md" legend="Global settings">
<Checkbox
checked={useLocalTime}
label="Use local time"
onChange={(event) =>
dispatch(
updateSettings({
useLocalTime: event.currentTarget.checked,
})
)
}
/>
</Fieldset>
<Fieldset p="md" legend="Query page settings">
<Stack>
<Checkbox
checked={enableQueryHistory}
label="Enable query history"
onChange={(event) =>
dispatch(
updateSettings({
enableQueryHistory: event.currentTarget.checked,
})
)
}
/>
<Checkbox
checked={enableAutocomplete}
label="Enable autocomplete"
onChange={(event) =>
dispatch(
updateSettings({
enableAutocomplete: event.currentTarget.checked,
})
)
}
/>
<Checkbox
checked={enableSyntaxHighlighting}
label="Enable syntax highlighting"
onChange={(event) =>
dispatch(
updateSettings({
enableSyntaxHighlighting: event.currentTarget.checked,
})
)
}
/>
<Checkbox
checked={enableLinter}
label="Enable linter"
onChange={(event) =>
dispatch(
updateSettings({
enableLinter: event.currentTarget.checked,
})
)
}
/>
</Stack>
</Fieldset>
<Fieldset p="md" legend="Query page settings">
<Stack>
<Checkbox
checked={enableQueryHistory}
label="Enable query history"
onChange={(event) =>
dispatch(
updateSettings({
enableQueryHistory: event.currentTarget.checked,
})
)
}
/>
<Checkbox
checked={enableAutocomplete}
label="Enable autocomplete"
onChange={(event) =>
dispatch(
updateSettings({
enableAutocomplete: event.currentTarget.checked,
})
)
}
/>
<Checkbox
checked={enableSyntaxHighlighting}
label="Enable syntax highlighting"
onChange={(event) =>
dispatch(
updateSettings({
enableSyntaxHighlighting: event.currentTarget.checked,
})
)
}
/>
<Checkbox
checked={enableLinter}
label="Enable linter"
onChange={(event) =>
dispatch(
updateSettings({
enableLinter: event.currentTarget.checked,
})
)
}
/>
</Stack>
</Fieldset>
</Stack>
<Fieldset p="md" legend="Alerts page settings">
<Checkbox
checked={showAnnotations}
label="Show expanded annotations"
onChange={(event) =>
dispatch(
updateSettings({
showAnnotations: event.currentTarget.checked,
})
)
}
/>
</Fieldset>
</Stack>
<Stack>
<Fieldset p="md" legend="Alerts page settings">
<Checkbox
checked={showAnnotations}
label="Show expanded annotations"
onChange={(event) =>
dispatch(
updateSettings({
showAnnotations: event.currentTarget.checked,
})
)
}
/>
</Fieldset>
<Fieldset p="md" legend="Alerts page settings">
<NumberInput
min={1}
allowDecimal={false}
label="Alert groups per page"
value={alertGroupsPerPage}
onChange={(value) => {
if (typeof value !== "number") {
return;
}
dispatch(
updateSettings({
alertGroupsPerPage: value,
})
);
}}
/>
</Fieldset>
<Fieldset p="md" legend="Rules page settings">
<NumberInput
min={1}
allowDecimal={false}
label="Rule groups per page"
value={ruleGroupsPerPage}
onChange={(value) => {
if (typeof value !== "number") {
return;
}
dispatch(
updateSettings({
ruleGroupsPerPage: value,
})
);
}}
/>
</Fieldset>
</Stack>
</Group>
</Popover.Dropdown>
</Popover>
);

View file

@ -11,6 +11,7 @@ import {
Alert,
TextInput,
Anchor,
Pagination,
} from "@mantine/core";
import { useSuspenseAPIQuery } from "../api/api";
import { AlertingRule, AlertingRulesResult } from "../api/responseTypes/rules";
@ -18,7 +19,7 @@ import badgeClasses from "../Badge.module.css";
import panelClasses from "../Panel.module.css";
import RuleDefinition from "../components/RuleDefinition";
import { humanizeDurationRelative, now } from "../lib/formatTime";
import { Fragment, useMemo } from "react";
import { Fragment, useEffect, useMemo } from "react";
import { StateMultiSelect } from "../components/StateMultiSelect";
import { IconInfoCircle, IconSearch } from "@tabler/icons-react";
import { LabelBadges } from "../components/LabelBadges";
@ -26,6 +27,7 @@ import { useSettings } from "../state/settingsSlice";
import {
ArrayParam,
BooleanParam,
NumberParam,
StringParam,
useQueryParam,
withDefault,
@ -33,6 +35,7 @@ import {
import { useDebouncedValue } from "@mantine/hooks";
import { KVSearch } from "@nexucis/kvsearch";
import { inputIconStyle } from "../styles";
import CustomInfiniteScroll from "../components/CustomInfiniteScroll";
type AlertsPageData = {
// How many rules are in each state across all groups.
@ -132,6 +135,12 @@ const buildAlertsPageData = (
return pageData;
};
// Should be defined as a constant here instead of inline as a value
// to avoid unnecessary re-renders. Otherwise the empty array has
// a different reference on each render and causes subsequent memoized
// computations to re-run as long as no state filter is selected.
const emptyStateFilter: string[] = [];
export default function AlertsPage() {
// Fetch the alerting rules data.
const { data } = useSuspenseAPIQuery<AlertingRulesResult>({
@ -146,7 +155,7 @@ export default function AlertsPage() {
// Define URL query params.
const [stateFilter, setStateFilter] = useQueryParam(
"state",
withDefault(ArrayParam, [])
withDefault(ArrayParam, emptyStateFilter)
);
const [searchFilter, setSearchFilter] = useQueryParam(
"search",
@ -158,132 +167,117 @@ export default function AlertsPage() {
withDefault(BooleanParam, true)
);
const { alertGroupsPerPage } = useSettings();
const [activePage, setActivePage] = useQueryParam(
"page",
withDefault(NumberParam, 1)
);
// Update the page data whenever the fetched data or filters change.
const alertsPageData: AlertsPageData = useMemo(
() => buildAlertsPageData(data.data, debouncedSearch, stateFilter),
[data, stateFilter, debouncedSearch]
);
const shownGroups = showEmptyGroups
? alertsPageData.groups
: alertsPageData.groups.filter((g) => g.rules.length > 0);
const shownGroups = useMemo(
() =>
showEmptyGroups
? alertsPageData.groups
: alertsPageData.groups.filter((g) => g.rules.length > 0),
[alertsPageData.groups, showEmptyGroups]
);
return (
<Stack mt="xs">
<Group>
<StateMultiSelect
options={["inactive", "pending", "firing"]}
optionClass={(o) =>
o === "inactive"
? badgeClasses.healthOk
: o === "pending"
? badgeClasses.healthWarn
: badgeClasses.healthErr
}
optionCount={(o) =>
alertsPageData.globalCounts[
o as keyof typeof alertsPageData.globalCounts
]
}
placeholder="Filter by rule state"
values={(stateFilter?.filter((v) => v !== null) as string[]) || []}
onChange={(values) => setStateFilter(values)}
/>
<TextInput
flex={1}
leftSection={<IconSearch style={inputIconStyle} />}
placeholder="Filter by rule name or labels"
value={searchFilter || ""}
onChange={(event) =>
setSearchFilter(event.currentTarget.value || null)
}
></TextInput>
</Group>
{alertsPageData.groups.length === 0 ? (
<Alert title="No rules found" icon={<IconInfoCircle />}>
No rules found.
</Alert>
) : (
!showEmptyGroups &&
alertsPageData.groups.length !== shownGroups.length && (
<Alert
title="Hiding groups with no matching rules"
icon={<IconInfoCircle/>}
>
Hiding {alertsPageData.groups.length - shownGroups.length} empty
groups due to filters or no rules.
<Anchor ml="md" fz="1em" onClick={() => setShowEmptyGroups(true)}>
Show empty groups
</Anchor>
</Alert>
)
)}
<Stack>
{shownGroups.map((g, i) => {
return (
<Card
shadow="xs"
withBorder
p="md"
key={i} // TODO: Find a stable and definitely unique key.
>
<Group mb="md" mt="xs" ml="xs" justify="space-between">
<Group align="baseline">
<Text
fz="xl"
fw={600}
c="var(--mantine-primary-color-filled)"
>
{g.name}
</Text>
<Text fz="sm" c="gray.6">
{g.file}
</Text>
</Group>
<Group>
{g.counts.firing > 0 && (
<Badge className={badgeClasses.healthErr}>
firing ({g.counts.firing})
</Badge>
)}
{g.counts.pending > 0 && (
<Badge className={badgeClasses.healthWarn}>
pending ({g.counts.pending})
</Badge>
)}
{g.counts.inactive > 0 && (
<Badge className={badgeClasses.healthOk}>
inactive ({g.counts.inactive})
</Badge>
)}
</Group>
</Group>
{g.counts.total === 0 ? (
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in this group.
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyGroups(false)}
>
Hide empty groups
</Anchor>
</Alert>
) : g.rules.length === 0 ? (
<Alert title="No matching rules" icon={<IconInfoCircle />}>
No rules in this group match your filter criteria (omitted{" "}
{g.counts.total} filtered rules).
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyGroups(false)}
>
Hide empty groups
</Anchor>
</Alert>
) : (
// If we were e.g. on page 10 and the number of total pages decreases to 5 (due to filtering
// or changing the max number of items per page), go to the largest possible page.
const totalPageCount = Math.ceil(shownGroups.length / alertGroupsPerPage);
const effectiveActivePage = Math.max(1, Math.min(activePage, totalPageCount));
useEffect(() => {
if (effectiveActivePage !== activePage) {
setActivePage(effectiveActivePage);
}
}, [effectiveActivePage, activePage, setActivePage]);
const currentPageGroups = useMemo(
() =>
shownGroups.slice(
(effectiveActivePage - 1) * alertGroupsPerPage,
effectiveActivePage * alertGroupsPerPage
),
[shownGroups, effectiveActivePage, alertGroupsPerPage]
);
// We memoize the actual rendering of the page items to avoid re-rendering
// them on every state change. This is especially important when the user
// types into the search box, as the search filter changes on every keystroke,
// even before debouncing takes place (extracting the filters and results list
// into separate components would be an alternative to this, but it's kinda
// convenient to have in the same file IMO).
const renderedPageItems = useMemo(
() =>
currentPageGroups.map((g, i) => (
<Card
shadow="xs"
withBorder
p="md"
key={i} // TODO: Find a stable and definitely unique key.
>
<Group mb="md" mt="xs" ml="xs" justify="space-between">
<Group align="baseline">
<Text fz="xl" fw={600} c="var(--mantine-primary-color-filled)">
{g.name}
</Text>
<Text fz="sm" c="gray.6">
{g.file}
</Text>
</Group>
<Group>
{g.counts.firing > 0 && (
<Badge className={badgeClasses.healthErr}>
firing ({g.counts.firing})
</Badge>
)}
{g.counts.pending > 0 && (
<Badge className={badgeClasses.healthWarn}>
pending ({g.counts.pending})
</Badge>
)}
{g.counts.inactive > 0 && (
<Badge className={badgeClasses.healthOk}>
inactive ({g.counts.inactive})
</Badge>
)}
</Group>
</Group>
{g.counts.total === 0 ? (
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in this group.
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyGroups(false)}
>
Hide empty groups
</Anchor>
</Alert>
) : g.rules.length === 0 ? (
<Alert title="No matching rules" icon={<IconInfoCircle />}>
No rules in this group match your filter criteria (omitted{" "}
{g.counts.total} filtered rules).
<Anchor
ml="md"
fz="1em"
onClick={() => setShowEmptyGroups(false)}
>
Hide empty groups
</Anchor>
</Alert>
) : (
<CustomInfiniteScroll
allItems={g.rules}
child={({ items }) => (
<Accordion multiple variant="separated">
{g.rules.map((r, j) => {
{items.map((r, j) => {
return (
<Accordion.Item
styles={{
@ -327,7 +321,7 @@ export default function AlertsPage() {
{r.rule.alerts.length > 0 && (
<Table mt="lg">
<Table.Thead>
<Table.Tr style={{whiteSpace: "nowrap"}}>
<Table.Tr style={{ whiteSpace: "nowrap" }}>
<Table.Th>Alert labels</Table.Th>
<Table.Th>State</Table.Th>
<Table.Th>Active Since</Table.Th>
@ -405,9 +399,71 @@ export default function AlertsPage() {
})}
</Accordion>
)}
</Card>
);
})}
/>
)}
</Card>
)),
[currentPageGroups, showAnnotations, setShowEmptyGroups]
);
return (
<Stack mt="xs">
<Group>
<StateMultiSelect
options={["inactive", "pending", "firing"]}
optionClass={(o) =>
o === "inactive"
? badgeClasses.healthOk
: o === "pending"
? badgeClasses.healthWarn
: badgeClasses.healthErr
}
optionCount={(o) =>
alertsPageData.globalCounts[
o as keyof typeof alertsPageData.globalCounts
]
}
placeholder="Filter by rule state"
values={(stateFilter?.filter((v) => v !== null) as string[]) || []}
onChange={(values) => setStateFilter(values)}
/>
<TextInput
flex={1}
leftSection={<IconSearch style={inputIconStyle} />}
placeholder="Filter by rule name or labels"
value={searchFilter || ""}
onChange={(event) =>
setSearchFilter(event.currentTarget.value || null)
}
></TextInput>
</Group>
{alertsPageData.groups.length === 0 ? (
<Alert title="No rules found" icon={<IconInfoCircle />}>
No rules found.
</Alert>
) : (
!showEmptyGroups &&
alertsPageData.groups.length !== shownGroups.length && (
<Alert
title="Hiding groups with no matching rules"
icon={<IconInfoCircle />}
>
Hiding {alertsPageData.groups.length - shownGroups.length} empty
groups due to filters or no rules.
<Anchor ml="md" fz="1em" onClick={() => setShowEmptyGroups(true)}>
Show empty groups
</Anchor>
</Alert>
)
)}
<Stack>
<Pagination
total={totalPageCount}
value={effectiveActivePage}
onChange={setActivePage}
hideWithOnePage
/>
{renderedPageItems}
</Stack>
</Stack>
);

View file

@ -4,6 +4,7 @@ import {
Badge,
Card,
Group,
Pagination,
rem,
Stack,
Text,
@ -29,6 +30,10 @@ import { RulesResult } from "../api/responseTypes/rules";
import badgeClasses from "../Badge.module.css";
import RuleDefinition from "../components/RuleDefinition";
import { badgeIconStyle } from "../styles";
import { NumberParam, useQueryParam, withDefault } from "use-query-params";
import { useSettings } from "../state/settingsSlice";
import { useEffect } from "react";
import CustomInfiniteScroll from "../components/CustomInfiniteScroll";
const healthBadgeClass = (state: string) => {
switch (state) {
@ -45,6 +50,23 @@ const healthBadgeClass = (state: string) => {
export default function RulesPage() {
const { data } = useSuspenseAPIQuery<RulesResult>({ path: `/rules` });
const { ruleGroupsPerPage } = useSettings();
const [activePage, setActivePage] = useQueryParam(
"page",
withDefault(NumberParam, 1)
);
// If we were e.g. on page 10 and the number of total pages decreases to 5 (due
// changing the max number of items per page), go to the largest possible page.
const totalPageCount = Math.ceil(data.data.groups.length / ruleGroupsPerPage);
const effectiveActivePage = Math.max(1, Math.min(activePage, totalPageCount));
useEffect(() => {
if (effectiveActivePage !== activePage) {
setActivePage(effectiveActivePage);
}
}, [effectiveActivePage, activePage, setActivePage]);
return (
<Stack mt="xs">
@ -53,157 +75,178 @@ export default function RulesPage() {
No rule groups configured.
</Alert>
)}
{data.data.groups.map((g, i) => (
<Card
shadow="xs"
withBorder
p="md"
mb="md"
key={i} // TODO: Find a stable and definitely unique key.
>
<Group mb="md" mt="xs" ml="xs" justify="space-between">
<Group align="baseline">
<Text fz="xl" fw={600} c="var(--mantine-primary-color-filled)">
{g.name}
</Text>
<Text fz="sm" c="gray.6">
{g.file}
</Text>
<Pagination
total={totalPageCount}
value={effectiveActivePage}
onChange={setActivePage}
hideWithOnePage
/>
{data.data.groups
.slice(
(effectiveActivePage - 1) * ruleGroupsPerPage,
effectiveActivePage * ruleGroupsPerPage
)
.map((g, i) => (
<Card
shadow="xs"
withBorder
p="md"
mb="md"
key={i} // TODO: Find a stable and definitely unique key.
>
<Group mb="md" mt="xs" ml="xs" justify="space-between">
<Group align="baseline">
<Text fz="xl" fw={600} c="var(--mantine-primary-color-filled)">
{g.name}
</Text>
<Text fz="sm" c="gray.6">
{g.file}
</Text>
</Group>
<Group>
<Tooltip label="Last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
last run {humanizeDurationRelative(g.lastEvaluation, now())}
</Badge>
</Tooltip>
<Tooltip label="Duration of last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconHourglass style={badgeIconStyle} />}
>
took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)}
</Badge>
</Tooltip>
<Tooltip label="Group evaluation interval" withArrow>
<Badge
variant="transparent"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRepeat style={badgeIconStyle} />}
>
every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "}
</Badge>
</Tooltip>
</Group>
</Group>
<Group>
<Tooltip label="Last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
last run {humanizeDurationRelative(g.lastEvaluation, now())}
</Badge>
</Tooltip>
<Tooltip label="Duration of last group evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconHourglass style={badgeIconStyle} />}
>
took {humanizeDuration(parseFloat(g.evaluationTime) * 1000)}
</Badge>
</Tooltip>
<Tooltip label="Group evaluation interval" withArrow>
<Badge
variant="transparent"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRepeat style={badgeIconStyle} />}
>
every {humanizeDuration(parseFloat(g.interval) * 1000)}{" "}
</Badge>
</Tooltip>
</Group>
</Group>
{g.rules.length === 0 && (
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in rule group.
</Alert>
)}
<Accordion multiple variant="separated">
{g.rules.map((r, j) => (
<Accordion.Item
styles={{
item: {
// TODO: This transparency hack is an OK workaround to make the collapsed items
// have a different background color than their surrounding group card in dark mode,
// but it would be better to use CSS to override the light/dark colors for
// collapsed/expanded accordion items.
backgroundColor: "#c0c0c015",
},
}}
key={j}
value={j.toString()}
style={{
borderLeft:
r.health === "err"
? "5px solid var(--mantine-color-red-4)"
: r.health === "unknown"
? "5px solid var(--mantine-color-gray-5)"
: "5px solid var(--mantine-color-green-4)",
}}
>
<Accordion.Control>
<Group justify="space-between" mr="lg">
<Group gap="xs" wrap="nowrap">
{r.type === "alerting" ? (
<Tooltip label="Alerting rule" withArrow>
<IconBell
style={{ width: rem(15), height: rem(15) }}
/>
</Tooltip>
) : (
<Tooltip label="Recording rule" withArrow>
<IconTimeline
style={{ width: rem(15), height: rem(15) }}
/>
</Tooltip>
)}
<Text>{r.name}</Text>
</Group>
<Group gap="xs">
<Group gap="xs" wrap="wrap">
<Tooltip label="Last rule evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={<IconRefresh style={badgeIconStyle} />}
>
{humanizeDurationRelative(r.lastEvaluation, now())}
</Badge>
</Tooltip>
<Tooltip
label="Duration of last rule evaluation"
withArrow
>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={
<IconHourglass style={badgeIconStyle} />
}
>
{humanizeDuration(
parseFloat(r.evaluationTime) * 1000
)}
</Badge>
</Tooltip>
</Group>
<Badge className={healthBadgeClass(r.health)}>
{r.health}
</Badge>
</Group>
</Group>
</Accordion.Control>
<Accordion.Panel>
<RuleDefinition rule={r} />
{r.lastError && (
<Alert
color="red"
mt="sm"
title="Rule failed to evaluate"
icon={<IconAlertTriangle />}
{g.rules.length === 0 && (
<Alert title="No rules" icon={<IconInfoCircle />}>
No rules in rule group.
</Alert>
)}
<CustomInfiniteScroll
allItems={g.rules}
child={({ items }) => (
<Accordion multiple variant="separated">
{items.map((r, j) => (
<Accordion.Item
styles={{
item: {
// TODO: This transparency hack is an OK workaround to make the collapsed items
// have a different background color than their surrounding group card in dark mode,
// but it would be better to use CSS to override the light/dark colors for
// collapsed/expanded accordion items.
backgroundColor: "#c0c0c015",
},
}}
key={j}
value={j.toString()}
style={{
borderLeft:
r.health === "err"
? "5px solid var(--mantine-color-red-4)"
: r.health === "unknown"
? "5px solid var(--mantine-color-gray-5)"
: "5px solid var(--mantine-color-green-4)",
}}
>
<strong>Error:</strong> {r.lastError}
</Alert>
)}
</Accordion.Panel>
</Accordion.Item>
))}
</Accordion>
</Card>
))}
<Accordion.Control>
<Group justify="space-between" mr="lg">
<Group gap="xs" wrap="nowrap">
{r.type === "alerting" ? (
<Tooltip label="Alerting rule" withArrow>
<IconBell
style={{ width: rem(15), height: rem(15) }}
/>
</Tooltip>
) : (
<Tooltip label="Recording rule" withArrow>
<IconTimeline
style={{ width: rem(15), height: rem(15) }}
/>
</Tooltip>
)}
<Text>{r.name}</Text>
</Group>
<Group gap="xs">
<Group gap="xs" wrap="wrap">
<Tooltip label="Last rule evaluation" withArrow>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={
<IconRefresh style={badgeIconStyle} />
}
>
{humanizeDurationRelative(
r.lastEvaluation,
now()
)}
</Badge>
</Tooltip>
<Tooltip
label="Duration of last rule evaluation"
withArrow
>
<Badge
variant="light"
className={badgeClasses.statsBadge}
styles={{ label: { textTransform: "none" } }}
leftSection={
<IconHourglass style={badgeIconStyle} />
}
>
{humanizeDuration(
parseFloat(r.evaluationTime) * 1000
)}
</Badge>
</Tooltip>
</Group>
<Badge className={healthBadgeClass(r.health)}>
{r.health}
</Badge>
</Group>
</Group>
</Accordion.Control>
<Accordion.Panel>
<RuleDefinition rule={r} />
{r.lastError && (
<Alert
color="red"
mt="sm"
title="Rule failed to evaluate"
icon={<IconAlertTriangle />}
>
<strong>Error:</strong> {r.lastError}
</Alert>
)}
</Accordion.Panel>
</Accordion.Item>
))}
</Accordion>
)}
/>
</Card>
))}
</Stack>
);
}

View file

@ -1,6 +1,6 @@
module github.com/prometheus/prometheus/web/ui/mantine-ui/src/promql/tools
go 1.22.0
go 1.22.7
require (
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc

View file

@ -63,6 +63,7 @@ startAppListening({
case "enableSyntaxHighlighting":
case "enableLinter":
case "showAnnotations":
case "ruleGroupsPerPage":
return persistToLocalStorage(`settings.${key}`, value);
}
});

View file

@ -14,6 +14,8 @@ interface Settings {
enableSyntaxHighlighting: boolean;
enableLinter: boolean;
showAnnotations: boolean;
ruleGroupsPerPage: number;
alertGroupsPerPage: number;
}
// Declared/defined in public/index.html, value replaced by Prometheus when serving bundle.
@ -29,6 +31,8 @@ export const localStorageKeyEnableSyntaxHighlighting =
"settings.enableSyntaxHighlighting";
export const localStorageKeyEnableLinter = "settings.enableLinter";
export const localStorageKeyShowAnnotations = "settings.showAnnotations";
export const localStorageKeyRuleGroupsPerPage = "settings.ruleGroupsPerPage";
export const localStorageKeyAlertGroupsPerPage = "settings.alertGroupsPerPage";
// This dynamically/generically determines the pathPrefix by stripping the first known
// endpoint suffix from the window location path. It works out of the box for both direct
@ -95,6 +99,14 @@ export const initialState: Settings = {
localStorageKeyShowAnnotations,
true
),
ruleGroupsPerPage: initializeFromLocalStorage<number>(
localStorageKeyRuleGroupsPerPage,
10
),
alertGroupsPerPage: initializeFromLocalStorage<number>(
localStorageKeyAlertGroupsPerPage,
10
),
};
export const settingsSlice = createSlice({

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/codemirror-promql",
"version": "0.300.0",
"version": "0.300.1",
"description": "a CodeMirror mode for the PromQL language",
"types": "dist/esm/index.d.ts",
"module": "dist/esm/index.js",
@ -29,21 +29,21 @@
},
"homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md",
"dependencies": {
"@prometheus-io/lezer-promql": "0.300.0",
"lru-cache": "^11.0.1"
"@prometheus-io/lezer-promql": "0.300.1",
"lru-cache": "^11.0.2"
},
"devDependencies": {
"@codemirror/autocomplete": "^6.18.1",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1",
"@codemirror/autocomplete": "^6.18.3",
"@codemirror/language": "^6.10.6",
"@codemirror/lint": "^6.8.4",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.34.1",
"@lezer/common": "^1.2.1",
"@lezer/common": "^1.2.3",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.2",
"eslint-plugin-prettier": "^5.1.3",
"isomorphic-fetch": "^3.0.0",
"nock": "^13.5.4"
"nock": "^13.5.6"
},
"peerDependencies": {
"@codemirror/autocomplete": "^6.4.0",

View file

@ -1,6 +1,6 @@
{
"name": "@prometheus-io/lezer-promql",
"version": "0.300.0",
"version": "0.300.1",
"description": "lezer-based PromQL grammar",
"main": "dist/index.cjs",
"type": "module",
@ -31,10 +31,10 @@
"test": "NODE_OPTIONS=--experimental-vm-modules jest"
},
"devDependencies": {
"@lezer/generator": "^1.7.1",
"@lezer/generator": "^1.7.2",
"@lezer/highlight": "^1.2.0",
"@lezer/lr": "^1.4.2",
"@rollup/plugin-node-resolve": "^15.2.3"
"@rollup/plugin-node-resolve": "^15.3.0"
},
"peerDependencies": {
"@lezer/highlight": "^1.1.2",

2780
web/ui/package-lock.json generated

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
{
"name": "prometheus-io",
"description": "Monorepo for the Prometheus UI",
"version": "0.300.0",
"version": "0.300.1",
"private": true,
"scripts": {
"build": "bash build_ui.sh --all",
@ -15,13 +15,13 @@
"module/*"
],
"devDependencies": {
"@types/jest": "^29.5.13",
"@types/jest": "^29.5.14",
"@typescript-eslint/eslint-plugin": "^6.21.0",
"@typescript-eslint/parser": "^6.21.0",
"eslint-config-prettier": "^9.1.0",
"prettier": "^3.3.3",
"prettier": "^3.4.2",
"ts-jest": "^29.2.2",
"typescript": "^5.6.2",
"vite": "^5.4.8"
"typescript": "^5.7.2",
"vite": "^6.0.3"
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,33 +1,33 @@
{
"name": "@prometheus-io/app",
"version": "0.300.0",
"version": "0.300.1",
"private": true,
"dependencies": {
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
"@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1",
"@codemirror/search": "^6.5.6",
"@codemirror/autocomplete": "^6.18.3",
"@codemirror/commands": "^6.7.1",
"@codemirror/language": "^6.10.6",
"@codemirror/lint": "^6.8.4",
"@codemirror/search": "^6.5.8",
"@codemirror/state": "^6.3.3",
"@codemirror/view": "^6.29.1",
"@codemirror/view": "^6.35.3",
"@forevolve/bootstrap-dark": "^4.0.2",
"@fortawesome/fontawesome-svg-core": "6.5.2",
"@fortawesome/free-solid-svg-icons": "6.5.2",
"@fortawesome/react-fontawesome": "0.2.0",
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@fortawesome/react-fontawesome": "0.2.2",
"@lezer/common": "^1.2.3",
"@lezer/highlight": "^1.2.1",
"@lezer/lr": "^1.4.2",
"@nexucis/fuzzy": "^0.4.1",
"@nexucis/kvsearch": "^0.8.1",
"@prometheus-io/codemirror-promql": "0.55.0",
"@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.300.1",
"bootstrap": "^4.6.2",
"css.escape": "^1.5.1",
"downshift": "^9.0.6",
"http-proxy-middleware": "^3.0.0",
"downshift": "^9.0.8",
"http-proxy-middleware": "^3.0.3",
"jquery": "^3.7.1",
"jquery.flot.tooltip": "^0.9.0",
"moment": "^2.30.1",
"moment-timezone": "^0.5.45",
"moment-timezone": "^0.5.46",
"popper.js": "^1.14.3",
"react": "^17.0.2",
"react-copy-to-clipboard": "^5.1.0",
@ -37,8 +37,8 @@
"react-router-dom": "^5.3.4",
"react-test-renderer": "^17.0.2",
"reactstrap": "^8.10.1",
"sanitize-html": "^2.13.0",
"sass": "1.77.6",
"sanitize-html": "^2.13.1",
"sass": "1.82.0",
"tempusdominus-bootstrap-4": "^5.39.2",
"tempusdominus-core": "^5.19.3"
},
@ -65,17 +65,17 @@
"not op_mini all"
],
"devDependencies": {
"@testing-library/react-hooks": "^7.0.2",
"@testing-library/react-hooks": "^8.0.1",
"@types/enzyme": "^3.10.18",
"@types/flot": "0.0.36",
"@types/jest": "^29.5.12",
"@types/jquery": "^3.5.30",
"@types/node": "^20.14.9",
"@types/jest": "^29.5.14",
"@types/jquery": "^3.5.32",
"@types/node": "^22.10.2",
"@types/react": "^17.0.71",
"@types/react-copy-to-clipboard": "^5.0.7",
"@types/react-dom": "^17.0.25",
"@types/react-router-dom": "^5.3.3",
"@types/sanitize-html": "^2.11.0",
"@types/sanitize-html": "^2.13.0",
"@types/sinon": "^17.0.3",
"@wojtekmaj/enzyme-adapter-react-17": "^0.8.0",
"enzyme": "^3.11.0",
@ -88,8 +88,8 @@
"mutationobserver-shim": "^0.3.7",
"prettier": "^2.8.8",
"react-scripts": "^5.0.1",
"sinon": "^18.0.0",
"ts-jest": "^29.2.2"
"sinon": "^19.0.2",
"ts-jest": "^29.2.5"
},
"jest": {
"snapshotSerializers": [

View file

@ -290,6 +290,7 @@ type Options struct {
EnableRemoteWriteReceiver bool
EnableOTLPWriteReceiver bool
IsAgent bool
CTZeroIngestionEnabled bool
AppName string
AcceptRemoteWriteProtoMsgs []config.RemoteWriteProtoMsg
@ -386,6 +387,7 @@ func New(logger *slog.Logger, o *Options) *Handler {
o.EnableRemoteWriteReceiver,
o.AcceptRemoteWriteProtoMsgs,
o.EnableOTLPWriteReceiver,
o.CTZeroIngestionEnabled,
)
if o.RoutePrefix != "/" {