mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' of github.com:prometheus/prometheus into openstack-loadbalancer-discovery
Signed-off-by: Paulo Dias <paulodias.gm@gmail.com>
This commit is contained in:
commit
cddf729ca3
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2
|
||||
- uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2
|
||||
- uses: bufbuild/buf-setup-action@1115d0acd3d2a120b30023fac52abc46807c8fd6 # v1.48.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
|
@ -195,7 +195,7 @@ jobs:
|
|||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v1.62.0
|
||||
version: v1.63.4
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
|
6
.github/workflows/codeql-analysis.yml
vendored
6
.github/workflows/codeql-analysis.yml
vendored
|
@ -27,12 +27,12 @@ jobs:
|
|||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7
|
||||
uses: github/codeql-action/init@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7
|
||||
uses: github/codeql-action/autobuild@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@babb554ede22fd5605947329c4d04d8e7a0b8155 # v3.27.7
|
||||
uses: github/codeql-action/analyze@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.0
|
||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
4
.github/workflows/scorecards.yml
vendored
4
.github/workflows/scorecards.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3
|
||||
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # tag=v4.5.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@babb554ede22fd5605947329c4d04d8e7a0b8155 # tag=v3.27.7
|
||||
uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # tag=v3.28.0
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
@ -5,25 +5,28 @@ output:
|
|||
sort-results: true
|
||||
|
||||
linters:
|
||||
# Keep this list sorted alphabetically
|
||||
enable:
|
||||
- depguard
|
||||
- errorlint
|
||||
- exptostd
|
||||
- gocritic
|
||||
- godot
|
||||
- gofumpt
|
||||
- goimports
|
||||
- loggercheck
|
||||
- misspell
|
||||
- nilnesserr
|
||||
- nolintlint
|
||||
- perfsprint
|
||||
- predeclared
|
||||
- revive
|
||||
- sloglint
|
||||
- testifylint
|
||||
- unconvert
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
- loggercheck
|
||||
- sloglint
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
|
|
56
CHANGELOG.md
56
CHANGELOG.md
|
@ -2,9 +2,55 @@
|
|||
|
||||
## unreleased
|
||||
|
||||
* [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428
|
||||
* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416
|
||||
* [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710
|
||||
* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719
|
||||
|
||||
## 3.1.0 / 2025-01-02
|
||||
|
||||
* [SECURITY] upgrade golang.org/x/crypto to address reported CVE-2024-45337. #15691
|
||||
* [CHANGE] Notifier: Increment prometheus_notifications_errors_total by the number of affected alerts rather than per batch. #15428
|
||||
* [CHANGE] API: list rules field "groupNextToken:omitempty" renamed to "groupNextToken". #15400
|
||||
* [ENHANCEMENT] OTLP translate: keep identifying attributes in target_info. #15448
|
||||
* [ENHANCEMENT] Paginate rule groups, add infinite scroll to rules within groups. #15677
|
||||
* [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880
|
||||
* [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672
|
||||
* [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339
|
||||
* [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
|
||||
* [PERF] Optimize `l=~".+"` matcher. #15474, #15684
|
||||
* [PERF] TSDB: Cache all symbols for compaction . #15455
|
||||
* [PERF] TSDB: MemPostings: keep a map of label values slices. #15426
|
||||
* [PERF] Remote-Write: Remove interning hook. #15456
|
||||
* [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453
|
||||
* [PERF] TSDB: reduce memory allocations. #15465, #15427
|
||||
* [PERF] Storage: Implement limit in mergeGenericQuerier. #14489
|
||||
* [PERF] TSDB: Optimize inverse matching. #14144
|
||||
* [PERF] Regex: use stack memory for lowercase copy of string. #15210
|
||||
* [PERF] TSDB: When deleting from postings index, pause to unlock and let readers read. #15242
|
||||
* [BUGFIX] Main: Avoid possible segfault at exit. (#15724)
|
||||
* [BUGFIX] Rules: Do not run rules concurrently if uncertain about dependencies. #15560
|
||||
* [BUGFIX] PromQL: Adds test for `absent`, `absent_over_time` and `deriv` func with histograms. #15667
|
||||
* [BUGFIX] PromQL: Fix various bugs related to quoting UTF-8 characters. #15531
|
||||
* [BUGFIX] Scrape: fix nil panic after scrape loop reload. #15563
|
||||
* [BUGFIX] Remote-write: fix panic on repeated log message. #15562
|
||||
* [BUGFIX] Scrape: reload would ignore always_scrape_classic_histograms and convert_classic_histograms_to_nhcb configs. #15489
|
||||
* [BUGFIX] TSDB: fix data corruption in experimental native histograms. #15482
|
||||
* [BUGFIX] PromQL: Ignore histograms in all time related functions. #15479
|
||||
* [BUGFIX] OTLP receiver: Convert metric metadata. #15416
|
||||
* [BUGFIX] PromQL: Fix `resets` function for histograms. #15527
|
||||
* [BUGFIX] PromQL: Fix behaviour of `changes()` for mix of histograms and floats. #15469
|
||||
* [BUGFIX] PromQL: Fix behaviour of some aggregations with histograms. #15432
|
||||
* [BUGFIX] allow quoted exemplar keys in openmetrics text format. #15260
|
||||
* [BUGFIX] TSDB: fixes for rare conditions when loading write-behind-log (WBL). #15380
|
||||
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
|
||||
* [BUGFIX] Promtool: analyze block shows metric name with 0 cardinality. #15438
|
||||
* [BUGFIX] PromQL: Fix `count_values` for histograms. #15422
|
||||
* [BUGFIX] PromQL: fix issues with comparison binary operations with `bool` modifier and native histograms. #15413
|
||||
* [BUGFIX] PromQL: fix incorrect "native histogram ignored in aggregation" annotations. #15414
|
||||
* [BUGFIX] PromQL: Corrects the behaviour of some operator and aggregators with Native Histograms. #15245
|
||||
* [BUGFIX] TSDB: Always return unknown hint for first sample in non-gauge histogram chunk. #15343
|
||||
* [BUGFIX] PromQL: Clamp functions: Ignore any points with native histograms. #15169
|
||||
* [BUGFIX] TSDB: Fix race on stale values in headAppender. #15322
|
||||
* [BUGFIX] UI: Fix selector / series formatting for empty metric names. #15340
|
||||
* [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710
|
||||
|
||||
## 3.0.1 / 2024-11-28
|
||||
|
||||
|
@ -37,14 +83,14 @@ This release includes new features such as a brand new UI and UTF-8 support enab
|
|||
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
|
||||
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
|
||||
* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365
|
||||
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705
|
||||
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705, #15258
|
||||
* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807
|
||||
* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770
|
||||
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
||||
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||
* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384
|
||||
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
|
||||
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769, #15011
|
||||
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
|
||||
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
|
||||
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
|
||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.62.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.63.4
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
|
@ -275,6 +275,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
case "old-ui":
|
||||
c.web.UseOldUI = true
|
||||
logger.Info("Serving previous version of the Prometheus web UI.")
|
||||
case "otlp-deltatocumulative":
|
||||
c.web.ConvertOTLPDelta = true
|
||||
logger.Info("Converting delta OTLP metrics to cumulative")
|
||||
default:
|
||||
logger.Warn("Unknown option for --enable-feature", "option", o)
|
||||
}
|
||||
|
@ -516,7 +519,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
@ -534,7 +537,7 @@ func main() {
|
|||
|
||||
_, err := a.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err))
|
||||
fmt.Fprintf(os.Stderr, "Error parsing command line arguments: %s\n", err)
|
||||
a.Usage(os.Args[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
|
@ -548,7 +551,7 @@ func main() {
|
|||
notifs.AddNotification(notifications.StartingUp)
|
||||
|
||||
if err := cfg.setFeatureListOptions(logger); err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err))
|
||||
fmt.Fprintf(os.Stderr, "Error parsing feature list: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -1742,7 +1745,7 @@ func (s *readyStorage) WALReplayStatus() (tsdb.WALReplayStatus, error) {
|
|||
}
|
||||
|
||||
// ErrNotReady is returned if the underlying scrape manager is not ready yet.
|
||||
var ErrNotReady = errors.New("Scrape manager not ready")
|
||||
var ErrNotReady = errors.New("scrape manager not ready")
|
||||
|
||||
// ReadyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time.
|
||||
type readyScrapeManager struct {
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
"github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
|
@ -45,7 +45,6 @@ import (
|
|||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
promconfig "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -74,14 +73,19 @@ const (
|
|||
// Exit code 3 is used for "one or more lint issues detected".
|
||||
lintErrExitCode = 3
|
||||
|
||||
lintOptionAll = "all"
|
||||
lintOptionDuplicateRules = "duplicate-rules"
|
||||
lintOptionNone = "none"
|
||||
checkHealth = "/-/healthy"
|
||||
checkReadiness = "/-/ready"
|
||||
lintOptionAll = "all"
|
||||
lintOptionDuplicateRules = "duplicate-rules"
|
||||
lintOptionTooLongScrapeInterval = "too-long-scrape-interval"
|
||||
lintOptionNone = "none"
|
||||
checkHealth = "/-/healthy"
|
||||
checkReadiness = "/-/ready"
|
||||
)
|
||||
|
||||
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||
var (
|
||||
lintRulesOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||
// Same as lintRulesOptions, but including scrape config linting options as well.
|
||||
lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval)
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
|
@ -98,6 +102,10 @@ func main() {
|
|||
app.HelpFlag.Short('h')
|
||||
|
||||
checkCmd := app.Command("check", "Check the resources for validity.")
|
||||
checkLookbackDelta := checkCmd.Flag(
|
||||
"query.lookback-delta",
|
||||
"The server's maximum query lookback duration.",
|
||||
).Default("5m").Duration()
|
||||
|
||||
experimental := app.Flag("experimental", "Enable experimental commands.").Bool()
|
||||
|
||||
|
@ -114,11 +122,12 @@ func main() {
|
|||
checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool()
|
||||
checkConfigLint := checkConfigCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply to the rules specified in the config. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||
"Linting checks to apply to the rules/scrape configs specified in the config. Available options are: "+strings.Join(lintConfigOptions, ", ")+". Use --lint=none to disable linting",
|
||||
).Default(lintOptionDuplicateRules).String()
|
||||
checkConfigLintFatal := checkConfigCmd.Flag(
|
||||
"lint-fatal",
|
||||
"Make lint errors exit with exit code 3.").Default("false").Bool()
|
||||
checkConfigIgnoreUnknownFields := checkConfigCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.")
|
||||
webConfigFiles := checkWebConfigCmd.Arg(
|
||||
|
@ -141,11 +150,12 @@ func main() {
|
|||
).ExistingFiles()
|
||||
checkRulesLint := checkRulesCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||
"Linting checks to apply. Available options are: "+strings.Join(lintRulesOptions, ", ")+". Use --lint=none to disable linting",
|
||||
).Default(lintOptionDuplicateRules).String()
|
||||
checkRulesLintFatal := checkRulesCmd.Flag(
|
||||
"lint-fatal",
|
||||
"Make lint errors exit with exit code 3.").Default("false").Bool()
|
||||
checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
|
||||
checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
|
@ -219,6 +229,7 @@ func main() {
|
|||
).Required().ExistingFiles()
|
||||
testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool()
|
||||
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
|
||||
testRulesIgnoreUnknownFields := testRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
defaultDBPath := "data/"
|
||||
tsdbCmd := app.Command("tsdb", "Run tsdb commands.")
|
||||
|
@ -312,12 +323,12 @@ func main() {
|
|||
kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time")
|
||||
}
|
||||
var err error
|
||||
httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath)
|
||||
httpConfig, _, err := promconfig.LoadHTTPConfigFile(httpConfigFilePath)
|
||||
if err != nil {
|
||||
kingpin.Fatalf("Failed to load HTTP config file: %v", err)
|
||||
}
|
||||
|
||||
httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version))
|
||||
httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", promconfig.WithUserAgent("promtool/"+version.Version))
|
||||
if err != nil {
|
||||
kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err)
|
||||
}
|
||||
|
@ -340,7 +351,7 @@ func main() {
|
|||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
|
||||
|
||||
case checkConfigCmd.FullCommand():
|
||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.Duration(*checkLookbackDelta)), *configFiles...))
|
||||
|
||||
case checkServerHealthCmd.FullCommand():
|
||||
os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
|
||||
|
@ -352,7 +363,7 @@ func main() {
|
|||
os.Exit(CheckWebConfig(*webConfigFiles...))
|
||||
|
||||
case checkRulesCmd.FullCommand():
|
||||
os.Exit(CheckRules(newLintConfig(*checkRulesLint, *checkRulesLintFatal), *ruleFiles...))
|
||||
os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields), *ruleFiles...))
|
||||
|
||||
case checkMetricsCmd.FullCommand():
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
|
@ -394,6 +405,7 @@ func main() {
|
|||
*testRulesRun,
|
||||
*testRulesDiff,
|
||||
*testRulesDebug,
|
||||
*testRulesIgnoreUnknownFields,
|
||||
*testRulesFiles...),
|
||||
)
|
||||
|
||||
|
@ -446,16 +458,18 @@ func checkExperimental(f bool) {
|
|||
|
||||
var errLint = errors.New("lint error")
|
||||
|
||||
type lintConfig struct {
|
||||
all bool
|
||||
duplicateRules bool
|
||||
fatal bool
|
||||
type rulesLintConfig struct {
|
||||
all bool
|
||||
duplicateRules bool
|
||||
fatal bool
|
||||
ignoreUnknownFields bool
|
||||
}
|
||||
|
||||
func newLintConfig(stringVal string, fatal bool) lintConfig {
|
||||
func newRulesLintConfig(stringVal string, fatal, ignoreUnknownFields bool) rulesLintConfig {
|
||||
items := strings.Split(stringVal, ",")
|
||||
ls := lintConfig{
|
||||
fatal: fatal,
|
||||
ls := rulesLintConfig{
|
||||
fatal: fatal,
|
||||
ignoreUnknownFields: ignoreUnknownFields,
|
||||
}
|
||||
for _, setting := range items {
|
||||
switch setting {
|
||||
|
@ -465,16 +479,57 @@ func newLintConfig(stringVal string, fatal bool) lintConfig {
|
|||
ls.duplicateRules = true
|
||||
case lintOptionNone:
|
||||
default:
|
||||
fmt.Printf("WARNING: unknown lint option %s\n", setting)
|
||||
fmt.Printf("WARNING: unknown lint option: %q\n", setting)
|
||||
}
|
||||
}
|
||||
return ls
|
||||
}
|
||||
|
||||
func (ls lintConfig) lintDuplicateRules() bool {
|
||||
func (ls rulesLintConfig) lintDuplicateRules() bool {
|
||||
return ls.all || ls.duplicateRules
|
||||
}
|
||||
|
||||
type configLintConfig struct {
|
||||
rulesLintConfig
|
||||
|
||||
lookbackDelta model.Duration
|
||||
}
|
||||
|
||||
func newConfigLintConfig(optionsStr string, fatal, ignoreUnknownFields bool, lookbackDelta model.Duration) configLintConfig {
|
||||
c := configLintConfig{
|
||||
rulesLintConfig: rulesLintConfig{
|
||||
fatal: fatal,
|
||||
},
|
||||
}
|
||||
|
||||
lintNone := false
|
||||
var rulesOptions []string
|
||||
for _, option := range strings.Split(optionsStr, ",") {
|
||||
switch option {
|
||||
case lintOptionAll, lintOptionTooLongScrapeInterval:
|
||||
c.lookbackDelta = lookbackDelta
|
||||
if option == lintOptionAll {
|
||||
rulesOptions = append(rulesOptions, lintOptionAll)
|
||||
}
|
||||
case lintOptionNone:
|
||||
lintNone = true
|
||||
default:
|
||||
rulesOptions = append(rulesOptions, option)
|
||||
}
|
||||
}
|
||||
|
||||
if lintNone {
|
||||
c.lookbackDelta = 0
|
||||
rulesOptions = nil
|
||||
}
|
||||
|
||||
if len(rulesOptions) > 0 {
|
||||
c.rulesLintConfig = newRulesLintConfig(strings.Join(rulesOptions, ","), fatal, ignoreUnknownFields)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// CheckServerStatus - healthy & ready.
|
||||
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
|
||||
if serverURL.Scheme == "" {
|
||||
|
@ -513,12 +568,12 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
|
|||
}
|
||||
|
||||
// CheckConfig validates configuration files.
|
||||
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
|
||||
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, files ...string) int {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
|
||||
for _, f := range files {
|
||||
ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly)
|
||||
ruleFiles, scrapeConfigs, err := checkConfig(agentMode, f, checkSyntaxOnly)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
hasErrors = true
|
||||
|
@ -531,12 +586,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
|
|||
}
|
||||
fmt.Println()
|
||||
|
||||
rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings)
|
||||
if rulesFailed {
|
||||
failed = rulesFailed
|
||||
}
|
||||
if rulesHasErrors {
|
||||
hasErrors = rulesHasErrors
|
||||
if !checkSyntaxOnly {
|
||||
scrapeConfigsFailed := lintScrapeConfigs(scrapeConfigs, lintSettings)
|
||||
failed = failed || scrapeConfigsFailed
|
||||
rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig)
|
||||
failed = failed || rulesFailed
|
||||
hasErrors = hasErrors || rulesHaveErrors
|
||||
}
|
||||
}
|
||||
if failed && hasErrors {
|
||||
|
@ -575,12 +630,12 @@ func checkFileExists(fn string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
||||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, []*config.ScrapeConfig, error) {
|
||||
fmt.Println("Checking", filename)
|
||||
|
||||
cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var ruleFiles []string
|
||||
|
@ -588,15 +643,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
for _, rf := range cfg.RuleFiles {
|
||||
rfs, err := filepath.Glob(rf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
// If an explicit file was given, error if it is not accessible.
|
||||
if !strings.Contains(rf, "*") {
|
||||
if len(rfs) == 0 {
|
||||
return nil, fmt.Errorf("%q does not point to an existing file", rf)
|
||||
return nil, nil, fmt.Errorf("%q does not point to an existing file", rf)
|
||||
}
|
||||
if err := checkFileExists(rfs[0]); err != nil {
|
||||
return nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err)
|
||||
return nil, nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err)
|
||||
}
|
||||
}
|
||||
ruleFiles = append(ruleFiles, rfs...)
|
||||
|
@ -610,26 +665,26 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
var err error
|
||||
scfgs, err = cfg.GetScrapeConfigs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading scrape configs: %w", err)
|
||||
return nil, nil, fmt.Errorf("error loading scrape configs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, scfg := range scfgs {
|
||||
if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil {
|
||||
if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil {
|
||||
return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)
|
||||
return nil, nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, c := range scfg.ServiceDiscoveryConfigs {
|
||||
switch c := c.(type) {
|
||||
case *kubernetes.SDConfig:
|
||||
if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
case *file.SDConfig:
|
||||
if checkSyntaxOnly {
|
||||
|
@ -638,17 +693,17 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
for _, file := range c.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
for _, f := range files {
|
||||
var targetGroups []*targetgroup.Group
|
||||
targetGroups, err = checkSDFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
}
|
||||
if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -657,7 +712,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
}
|
||||
case discovery.StaticConfig:
|
||||
if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -674,18 +729,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
for _, file := range c.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
for _, f := range files {
|
||||
var targetGroups []*targetgroup.Group
|
||||
targetGroups, err = checkSDFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
}
|
||||
|
||||
if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -694,15 +749,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
}
|
||||
case discovery.StaticConfig:
|
||||
if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ruleFiles, nil
|
||||
return ruleFiles, scfgs, nil
|
||||
}
|
||||
|
||||
func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error {
|
||||
func checkTLSConfig(tlsConfig promconfig.TLSConfig, checkSyntaxOnly bool) error {
|
||||
if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 {
|
||||
return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile)
|
||||
}
|
||||
|
@ -761,7 +816,7 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
|
|||
}
|
||||
|
||||
// CheckRules validates rule files.
|
||||
func CheckRules(ls lintConfig, files ...string) int {
|
||||
func CheckRules(ls rulesLintConfig, files ...string) int {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
if len(files) == 0 {
|
||||
|
@ -781,7 +836,7 @@ func CheckRules(ls lintConfig, files ...string) int {
|
|||
}
|
||||
|
||||
// checkRulesFromStdin validates rule from stdin.
|
||||
func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
||||
func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
fmt.Println("Checking standard input")
|
||||
|
@ -790,7 +845,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
|||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return true, true
|
||||
}
|
||||
rgs, errs := rulefmt.Parse(data)
|
||||
rgs, errs := rulefmt.Parse(data, ls.ignoreUnknownFields)
|
||||
if errs != nil {
|
||||
failed = true
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
|
@ -819,12 +874,12 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
|||
}
|
||||
|
||||
// checkRules validates rule files.
|
||||
func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||
func checkRules(files []string, ls rulesLintConfig) (bool, bool) {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
for _, f := range files {
|
||||
fmt.Println("Checking", f)
|
||||
rgs, errs := rulefmt.ParseFile(f)
|
||||
rgs, errs := rulefmt.ParseFile(f, ls.ignoreUnknownFields)
|
||||
if errs != nil {
|
||||
failed = true
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
|
@ -853,7 +908,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) {
|
|||
return failed, hasErrors
|
||||
}
|
||||
|
||||
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) {
|
||||
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings rulesLintConfig) (int, []error) {
|
||||
numRules := 0
|
||||
for _, rg := range rgs.Groups {
|
||||
numRules += len(rg.Rules)
|
||||
|
@ -877,6 +932,16 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []e
|
|||
return numRules, nil
|
||||
}
|
||||
|
||||
func lintScrapeConfigs(scrapeConfigs []*config.ScrapeConfig, lintSettings configLintConfig) bool {
|
||||
for _, scfg := range scrapeConfigs {
|
||||
if lintSettings.lookbackDelta > 0 && scfg.ScrapeInterval >= lintSettings.lookbackDelta {
|
||||
fmt.Fprintf(os.Stderr, " FAILED: too long scrape interval found, data point will be marked as stale - job: %s, interval: %s\n", scfg.JobName, scfg.ScrapeInterval)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type compareRuleType struct {
|
||||
metric string
|
||||
label labels.Labels
|
||||
|
|
|
@ -185,7 +185,7 @@ func TestCheckDuplicates(t *testing.T) {
|
|||
c := test
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
rgs, err := rulefmt.ParseFile(c.ruleFile)
|
||||
rgs, err := rulefmt.ParseFile(c.ruleFile, false)
|
||||
require.Empty(t, err)
|
||||
dups := checkDuplicates(rgs.Groups)
|
||||
require.Equal(t, c.expectedDups, dups)
|
||||
|
@ -194,7 +194,7 @@ func TestCheckDuplicates(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkCheckDuplicates(b *testing.B) {
|
||||
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml")
|
||||
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false)
|
||||
require.Empty(b, err)
|
||||
b.ResetTimer()
|
||||
|
||||
|
@ -234,7 +234,7 @@ func TestCheckTargetConfig(t *testing.T) {
|
|||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
_, _, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
if test.err != "" {
|
||||
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
|
@ -319,7 +319,7 @@ func TestCheckConfigSyntax(t *testing.T) {
|
|||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
||||
_, _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
||||
expectedErrMsg := test.err
|
||||
if strings.Contains(runtime.GOOS, "windows") {
|
||||
expectedErrMsg = test.errWindows
|
||||
|
@ -355,7 +355,7 @@ func TestAuthorizationConfig(t *testing.T) {
|
|||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
_, _, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
if test.err != "" {
|
||||
require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||
return
|
||||
|
@ -508,7 +508,7 @@ func TestCheckRules(t *testing.T) {
|
|||
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||
require.Equal(t, successExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
|
@ -530,7 +530,7 @@ func TestCheckRules(t *testing.T) {
|
|||
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||
require.Equal(t, failureExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
|
@ -552,7 +552,7 @@ func TestCheckRules(t *testing.T) {
|
|||
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true))
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false))
|
||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
})
|
||||
}
|
||||
|
@ -560,23 +560,66 @@ func TestCheckRules(t *testing.T) {
|
|||
func TestCheckRulesWithRuleFiles(t *testing.T) {
|
||||
t.Run("rules-good", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml")
|
||||
require.Equal(t, successExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
t.Run("rules-bad", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml")
|
||||
require.Equal(t, failureExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml")
|
||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckScrapeConfigs(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
lookbackDelta model.Duration
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "scrape interval less than lookback delta",
|
||||
lookbackDelta: model.Duration(11 * time.Minute),
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "scrape interval greater than lookback delta",
|
||||
lookbackDelta: model.Duration(5 * time.Minute),
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "scrape interval same as lookback delta",
|
||||
lookbackDelta: model.Duration(10 * time.Minute),
|
||||
expectError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Non-fatal linting.
|
||||
code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
require.Equal(t, successExitCode, code, "Non-fatal linting should return success")
|
||||
// Fatal linting.
|
||||
code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
if tc.expectError {
|
||||
require.Equal(t, lintErrExitCode, code, "Fatal linting should return error")
|
||||
} else {
|
||||
require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems")
|
||||
}
|
||||
// Check syntax only, no linting.
|
||||
code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only")
|
||||
// Lint option "none" should disable linting.
|
||||
code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTSDBDumpCommand(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
|
|
|
@ -69,7 +69,7 @@ func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient q
|
|||
|
||||
// loadGroups parses groups from a list of recording rule files.
|
||||
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
|
||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, false, filenames...)
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
|
|
3
cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml
vendored
Normal file
3
cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
scrape_configs:
|
||||
- job_name: too_long_scrape_interval_test
|
||||
scrape_interval: 10m
|
33
cmd/promtool/testdata/rules_extrafields.yml
vendored
Normal file
33
cmd/promtool/testdata/rules_extrafields.yml
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
# This is the rules file. It has an extra "ownership"
|
||||
# field in the second group. promtool should ignore this field
|
||||
# and not return an error with --ignore-unknown-fields.
|
||||
|
||||
groups:
|
||||
- name: alerts
|
||||
namespace: "foobar"
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
||||
- alert: AlwaysFiring
|
||||
expr: 1
|
||||
|
||||
- name: rules
|
||||
ownership:
|
||||
service: "test"
|
||||
rules:
|
||||
- record: job:test:count_over_time1m
|
||||
expr: sum without(instance) (count_over_time(test[1m]))
|
||||
|
||||
# A recording rule that doesn't depend on input series.
|
||||
- record: fixed_data
|
||||
expr: 1
|
||||
|
||||
# Subquery with default resolution test.
|
||||
- record: suquery_interval_test
|
||||
expr: count_over_time(up[5m:])
|
21
cmd/promtool/testdata/rules_run_extrafields.yml
vendored
Normal file
21
cmd/promtool/testdata/rules_run_extrafields.yml
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Minimal test case to see that --ignore-unknown-fields
|
||||
# is working as expected. It should not return an error
|
||||
# when any extra fields are present in the rules file.
|
||||
rule_files:
|
||||
- rules_extrafields.yml
|
||||
|
||||
evaluation_interval: 1m
|
||||
|
||||
|
||||
tests:
|
||||
- name: extra ownership field test
|
||||
input_series:
|
||||
- series: test
|
||||
values: 1
|
||||
|
||||
promql_expr_test:
|
||||
- expr: test
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- value: 1
|
||||
labels: test
|
|
@ -46,11 +46,11 @@ import (
|
|||
|
||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||
// More info about the file format can be found in the docs.
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...)
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, ignoreUnknownFields, files...)
|
||||
}
|
||||
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
|
||||
failed := false
|
||||
junit := &junitxml.JUnitXML{}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
|||
}
|
||||
|
||||
for _, f := range files {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, ignoreUnknownFields, junit.Suite(f)); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
|
@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error {
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug, ignoreUnknownFields bool, ts *junitxml.TestSuite) []error {
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
ts.Abort(err)
|
||||
|
@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
|
|||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...)
|
||||
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
for _, e := range ers {
|
||||
tc.Fail(e.Error())
|
||||
|
@ -198,7 +198,7 @@ type testGroup struct {
|
|||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) {
|
||||
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields bool, ruleFiles ...string) (outErr []error) {
|
||||
if debug {
|
||||
testStart := time.Now()
|
||||
fmt.Printf("DEBUG: Starting test %s\n", testname)
|
||||
|
@ -228,7 +228,7 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde
|
|||
Logger: promslog.NewNopLogger(),
|
||||
}
|
||||
m := rules.NewManager(opts)
|
||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ignoreUnknownFields, ruleFiles...)
|
||||
if ers != nil {
|
||||
return ers
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, false, false, tt.args.files...); got != tt.want {
|
||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
|
@ -151,7 +151,7 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
t.Run("Junit xml output ", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var buf bytes.Buffer
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 {
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, false, reuseFiles...); got != 1 {
|
||||
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
|
||||
}
|
||||
var test junitxml.JUnitXML
|
||||
|
@ -194,10 +194,11 @@ func TestRulesUnitTestRun(t *testing.T) {
|
|||
files []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
queryOpts promqltest.LazyLoaderOpts
|
||||
want int
|
||||
name string
|
||||
args args
|
||||
queryOpts promqltest.LazyLoaderOpts
|
||||
want int
|
||||
ignoreUnknownFields bool
|
||||
}{
|
||||
{
|
||||
name: "Test all without run arg",
|
||||
|
@ -231,11 +232,19 @@ func TestRulesUnitTestRun(t *testing.T) {
|
|||
},
|
||||
want: 1,
|
||||
},
|
||||
{
|
||||
name: "Test all with extra fields",
|
||||
args: args{
|
||||
files: []string{"./testdata/rules_run_extrafields.yml"},
|
||||
},
|
||||
ignoreUnknownFields: true,
|
||||
want: 0,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...)
|
||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.ignoreUnknownFields, tt.args.files...)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -248,12 +249,7 @@ func (d *Discovery) shouldWatchFromName(name string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
for _, sn := range d.watchedServices {
|
||||
if sn == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(d.watchedServices, name)
|
||||
}
|
||||
|
||||
// shouldWatchFromTags returns whether the service of the given name should be watched based on its tags.
|
||||
|
|
|
@ -22,8 +22,9 @@ import (
|
|||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
|
||||
"github.com/gophercloud/gophercloud/openstack/networking/v2/ports"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
@ -72,8 +73,8 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou
|
|||
}
|
||||
|
||||
type floatingIPKey struct {
|
||||
id string
|
||||
fixed string
|
||||
deviceID string
|
||||
fixed string
|
||||
}
|
||||
|
||||
func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
|
@ -90,9 +91,33 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
return nil, fmt.Errorf("could not create OpenStack compute session: %w", err)
|
||||
}
|
||||
|
||||
networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{
|
||||
Region: i.region, Availability: i.availability,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create OpenStack network session: %w", err)
|
||||
}
|
||||
|
||||
// OpenStack API reference
|
||||
// https://developer.openstack.org/api-ref/compute/#list-floating-ips
|
||||
pagerFIP := floatingips.List(client)
|
||||
// https://docs.openstack.org/api-ref/network/v2/index.html#list-ports
|
||||
portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list all ports: %w", err)
|
||||
}
|
||||
|
||||
allPorts, err := ports.ExtractPorts(portPages)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract Ports: %w", err)
|
||||
}
|
||||
|
||||
portList := make(map[string]string)
|
||||
for _, port := range allPorts {
|
||||
portList[port.ID] = port.DeviceID
|
||||
}
|
||||
|
||||
// OpenStack API reference
|
||||
// https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ips
|
||||
pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{})
|
||||
floatingIPList := make(map[floatingIPKey]string)
|
||||
floatingIPPresent := make(map[string]struct{})
|
||||
err = pagerFIP.EachPage(func(page pagination.Page) (bool, error) {
|
||||
|
@ -102,11 +127,24 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
}
|
||||
for _, ip := range result {
|
||||
// Skip not associated ips
|
||||
if ip.InstanceID == "" || ip.FixedIP == "" {
|
||||
if ip.PortID == "" || ip.FixedIP == "" {
|
||||
continue
|
||||
}
|
||||
floatingIPList[floatingIPKey{id: ip.InstanceID, fixed: ip.FixedIP}] = ip.IP
|
||||
floatingIPPresent[ip.IP] = struct{}{}
|
||||
|
||||
// Fetch deviceID from portList
|
||||
deviceID, ok := portList[ip.PortID]
|
||||
if !ok {
|
||||
i.logger.Warn("Floating IP PortID not found in portList", "PortID", ip.PortID)
|
||||
continue
|
||||
}
|
||||
|
||||
key := floatingIPKey{
|
||||
deviceID: deviceID,
|
||||
fixed: ip.FixedIP,
|
||||
}
|
||||
|
||||
floatingIPList[key] = ip.FloatingIP
|
||||
floatingIPPresent[ip.FloatingIP] = struct{}{}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
@ -198,7 +236,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
}
|
||||
lbls[openstackLabelAddressPool] = model.LabelValue(pool)
|
||||
lbls[openstackLabelPrivateIP] = model.LabelValue(addr)
|
||||
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
|
||||
if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok {
|
||||
lbls[openstackLabelPublicIP] = model.LabelValue(val)
|
||||
}
|
||||
addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
|
||||
|
|
|
@ -32,6 +32,7 @@ func (s *OpenstackSDInstanceTestSuite) SetupTest(t *testing.T) {
|
|||
|
||||
s.Mock.HandleServerListSuccessfully()
|
||||
s.Mock.HandleFloatingIPListSuccessfully()
|
||||
s.Mock.HandlePortsListSuccessfully()
|
||||
|
||||
s.Mock.HandleVersionsSuccessfully()
|
||||
s.Mock.HandleAuthSuccessfully()
|
||||
|
@ -66,7 +67,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Len(t, tg.Targets, 4)
|
||||
require.Len(t, tg.Targets, 6)
|
||||
|
||||
for i, lbls := range []model.LabelSet{
|
||||
{
|
||||
|
@ -119,6 +120,31 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
"__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"),
|
||||
"__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.33:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_instance_name": model.LabelValue("merp-project2"),
|
||||
"__meta_openstack_private_ip": model.LabelValue("10.0.0.33"),
|
||||
"__meta_openstack_address_pool": model.LabelValue("private"),
|
||||
"__meta_openstack_tag_env": model.LabelValue("prod"),
|
||||
"__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"),
|
||||
"__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.34:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_instance_name": model.LabelValue("merp-project2"),
|
||||
"__meta_openstack_private_ip": model.LabelValue("10.0.0.34"),
|
||||
"__meta_openstack_address_pool": model.LabelValue("private"),
|
||||
"__meta_openstack_tag_env": model.LabelValue("prod"),
|
||||
"__meta_openstack_public_ip": model.LabelValue("10.10.10.24"),
|
||||
"__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"),
|
||||
"__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"),
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||
require.Equal(t, lbls, tg.Targets[i])
|
||||
|
|
|
@ -140,7 +140,7 @@ func (m *SDMock) HandleAuthSuccessfully() {
|
|||
{
|
||||
"endpoints": [
|
||||
{
|
||||
"id": "dc9a55e0bf84487a98671fbc74b68e68",
|
||||
"id": "5448e46679564d7d95466c2bef54c296",
|
||||
"interface": "public",
|
||||
"region": "RegionOne",
|
||||
"region_id": "RegionOne",
|
||||
|
@ -160,10 +160,9 @@ func (m *SDMock) HandleAuthSuccessfully() {
|
|||
"url": "%s"
|
||||
}
|
||||
],
|
||||
"id": "c609fc430175123490b62a4242e8a7e8",
|
||||
"id": "26968f704a68417bbddd29508455ff90",
|
||||
"type": "load-balancer"
|
||||
}
|
||||
|
||||
}
|
||||
],
|
||||
"expires_at": "2013-02-27T18:30:59.999999Z",
|
||||
"is_domain": false,
|
||||
|
@ -487,82 +486,159 @@ const serverListBody = `
|
|||
"metadata": {}
|
||||
},
|
||||
{
|
||||
"status": "ACTIVE",
|
||||
"updated": "2014-09-25T13:04:49Z",
|
||||
"hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
|
||||
"OS-EXT-SRV-ATTR:host": "devstack",
|
||||
"addresses": {
|
||||
"private": [
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.0.0.33",
|
||||
"OS-EXT-IPS:type": "fixed"
|
||||
},
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.0.0.34",
|
||||
"OS-EXT-IPS:type": "fixed"
|
||||
},
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.10.10.4",
|
||||
"OS-EXT-IPS:type": "floating"
|
||||
}
|
||||
]
|
||||
},
|
||||
"links": [
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"rel": "self"
|
||||
"status": "ACTIVE",
|
||||
"updated": "2014-09-25T13:04:49Z",
|
||||
"hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
|
||||
"OS-EXT-SRV-ATTR:host": "devstack",
|
||||
"addresses": {
|
||||
"private": [
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.0.0.33",
|
||||
"OS-EXT-IPS:type": "fixed"
|
||||
},
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.0.0.34",
|
||||
"OS-EXT-IPS:type": "fixed"
|
||||
},
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.10.10.4",
|
||||
"OS-EXT-IPS:type": "floating"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"key_name": null,
|
||||
"image": "",
|
||||
"OS-EXT-STS:task_state": null,
|
||||
"OS-EXT-STS:vm_state": "active",
|
||||
"OS-EXT-SRV-ATTR:instance_name": "instance-0000001d",
|
||||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||
"flavor": {
|
||||
"vcpus": 2,
|
||||
"ram": 4096,
|
||||
"disk": 0,
|
||||
"ephemeral": 0,
|
||||
"swap": 0,
|
||||
"original_name": "m1.small",
|
||||
"extra_specs": {
|
||||
"aggregate_instance_extra_specs:general": "true",
|
||||
"hw:mem_page_size": "large",
|
||||
"hw:vif_multiqueue_enabled": "true"
|
||||
"links": [
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"key_name": null,
|
||||
"image": "",
|
||||
"OS-EXT-STS:task_state": null,
|
||||
"OS-EXT-STS:vm_state": "active",
|
||||
"OS-EXT-SRV-ATTR:instance_name": "instance-0000001d",
|
||||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||
"flavor": {
|
||||
"vcpus": 2,
|
||||
"ram": 4096,
|
||||
"disk": 0,
|
||||
"ephemeral": 0,
|
||||
"swap": 0,
|
||||
"original_name": "m1.small",
|
||||
"extra_specs": {
|
||||
"aggregate_instance_extra_specs:general": "true",
|
||||
"hw:mem_page_size": "large",
|
||||
"hw:vif_multiqueue_enabled": "true"
|
||||
}
|
||||
},
|
||||
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
||||
"security_groups": [
|
||||
{
|
||||
"name": "default"
|
||||
}
|
||||
],
|
||||
"OS-SRV-USG:terminated_at": null,
|
||||
"OS-EXT-AZ:availability_zone": "nova",
|
||||
"user_id": "9349aff8be7545ac9d2f1d00999a23cd",
|
||||
"name": "merp",
|
||||
"created": "2014-09-25T13:04:41Z",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"OS-DCF:diskConfig": "MANUAL",
|
||||
"os-extended-volumes:volumes_attached": [],
|
||||
"accessIPv4": "",
|
||||
"accessIPv6": "",
|
||||
"progress": 0,
|
||||
"OS-EXT-STS:power_state": 1,
|
||||
"config_drive": "",
|
||||
"metadata": {
|
||||
"env": "prod"
|
||||
}
|
||||
},
|
||||
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
||||
"security_groups": [
|
||||
{
|
||||
"name": "default"
|
||||
{
|
||||
"status": "ACTIVE",
|
||||
"updated": "2014-09-25T13:04:49Z",
|
||||
"hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362",
|
||||
"OS-EXT-SRV-ATTR:host": "devstack",
|
||||
"addresses": {
|
||||
"private": [
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.0.0.33",
|
||||
"OS-EXT-IPS:type": "fixed"
|
||||
},
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.0.0.34",
|
||||
"OS-EXT-IPS:type": "fixed"
|
||||
},
|
||||
{
|
||||
"version": 4,
|
||||
"addr": "10.10.10.24",
|
||||
"OS-EXT-IPS:type": "floating"
|
||||
}
|
||||
]
|
||||
},
|
||||
"links": [
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/v2/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://104.130.131.164:8774/b78fef2305934dbbbeb9a10b4c326f7a/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"key_name": null,
|
||||
"image": "",
|
||||
"OS-EXT-STS:task_state": null,
|
||||
"OS-EXT-STS:vm_state": "active",
|
||||
"OS-EXT-SRV-ATTR:instance_name": "instance-0000002d",
|
||||
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
|
||||
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
|
||||
"flavor": {
|
||||
"vcpus": 2,
|
||||
"ram": 4096,
|
||||
"disk": 0,
|
||||
"ephemeral": 0,
|
||||
"swap": 0,
|
||||
"original_name": "m1.small",
|
||||
"extra_specs": {
|
||||
"aggregate_instance_extra_specs:general": "true",
|
||||
"hw:mem_page_size": "large",
|
||||
"hw:vif_multiqueue_enabled": "true"
|
||||
}
|
||||
},
|
||||
"id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f",
|
||||
"security_groups": [
|
||||
{
|
||||
"name": "default"
|
||||
}
|
||||
],
|
||||
"OS-SRV-USG:terminated_at": null,
|
||||
"OS-EXT-AZ:availability_zone": "nova",
|
||||
"user_id": "9349aff8be7545ac9d2f1d00999a23cd",
|
||||
"name": "merp-project2",
|
||||
"created": "2014-09-25T13:04:41Z",
|
||||
"tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a",
|
||||
"OS-DCF:diskConfig": "MANUAL",
|
||||
"os-extended-volumes:volumes_attached": [],
|
||||
"accessIPv4": "",
|
||||
"accessIPv6": "",
|
||||
"progress": 0,
|
||||
"OS-EXT-STS:power_state": 1,
|
||||
"config_drive": "",
|
||||
"metadata": {
|
||||
"env": "prod"
|
||||
}
|
||||
],
|
||||
"OS-SRV-USG:terminated_at": null,
|
||||
"OS-EXT-AZ:availability_zone": "nova",
|
||||
"user_id": "9349aff8be7545ac9d2f1d00999a23cd",
|
||||
"name": "merp",
|
||||
"created": "2014-09-25T13:04:41Z",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"OS-DCF:diskConfig": "MANUAL",
|
||||
"os-extended-volumes:volumes_attached": [],
|
||||
"accessIPv4": "",
|
||||
"accessIPv6": "",
|
||||
"progress": 0,
|
||||
"OS-EXT-STS:power_state": 1,
|
||||
"config_drive": "",
|
||||
"metadata": {
|
||||
"env": "prod"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
@ -580,35 +656,139 @@ func (m *SDMock) HandleServerListSuccessfully() {
|
|||
|
||||
const listOutput = `
|
||||
{
|
||||
"floating_ips": [
|
||||
"floatingips": [
|
||||
{
|
||||
"id": "03a77860-ae03-46c4-b502-caea11467a79",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"floating_ip_address": "10.10.10.1",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "d5597901-48c8-4a69-a041-cfc5be158a04",
|
||||
"fixed_ip_address": null,
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T16:30:27Z",
|
||||
"updated_at": "2023-08-30T16:30:28Z"
|
||||
},
|
||||
{
|
||||
"id": "03e28c79-5a4c-491e-a4fe-3ff6bba830c6",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"floating_ip_address": "10.10.10.2",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "4a45b012-0478-484d-8cf3-c8abdb194d08",
|
||||
"fixed_ip_address": "10.0.0.32",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-09-06T15:45:36Z",
|
||||
"updated_at": "2023-09-06T15:45:36Z"
|
||||
},
|
||||
{
|
||||
"id": "087fcdd2-1d13-4f72-9c0e-c759e796d558",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"floating_ip_address": "10.10.10.4",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "a0e244e8-7910-4427-b8d1-20470cad4f8a",
|
||||
"fixed_ip_address": "10.0.0.34",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2024-01-24T13:30:50Z",
|
||||
"updated_at": "2024-01-24T13:30:51Z"
|
||||
},
|
||||
{
|
||||
"id": "b23df91a-a74a-4f75-b252-750aff4a5a0c",
|
||||
"tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a",
|
||||
"floating_ip_address": "10.10.10.24",
|
||||
"floating_network_id": "b19ff5bc-a49a-46cc-8d14-ca5f1e94791f",
|
||||
"router_id": "65a5e5af-17f0-4124-9a81-c08b44f5b8a7",
|
||||
"port_id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9",
|
||||
"fixed_ip_address": "10.0.0.34",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2024-01-24T13:30:50Z",
|
||||
"updated_at": "2024-01-24T13:30:51Z"
|
||||
},
|
||||
{
|
||||
"fixed_ip": null,
|
||||
"id": "1",
|
||||
"instance_id": null,
|
||||
"ip": "10.10.10.1",
|
||||
"pool": "nova"
|
||||
},
|
||||
{
|
||||
"fixed_ip": "10.0.0.32",
|
||||
"id": "2",
|
||||
"instance_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
|
||||
"ip": "10.10.10.2",
|
||||
"pool": "nova"
|
||||
},
|
||||
{
|
||||
"fixed_ip": "10.0.0.34",
|
||||
"id": "3",
|
||||
"instance_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
||||
"ip": "10.10.10.4",
|
||||
"pool": "nova"
|
||||
}
|
||||
]
|
||||
"id": "fea7332d-9027-4cf9-bf62-c3c4c6ebaf84",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"floating_ip_address": "192.168.1.2",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e",
|
||||
"fixed_ip_address": "10.0.0.32",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T15:11:37Z",
|
||||
"updated_at": "2023-08-30T15:11:38Z",
|
||||
"revision_number": 1,
|
||||
"project_id": "fcad67a6189847c4aecfa3c81a05783b"
|
||||
},
|
||||
{
|
||||
"id": "febb9554-cf83-4f9b-94d9-1b3c34be357f",
|
||||
"tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85",
|
||||
"floating_ip_address": "192.168.3.4",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9",
|
||||
"fixed_ip_address": "10.0.2.78",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T15:11:37Z",
|
||||
"updated_at": "2023-08-30T15:11:38Z",
|
||||
"revision_number": 1,
|
||||
"project_id": "ac57f03dba1a4fdebff3e67201bc7a85"
|
||||
},
|
||||
{
|
||||
"id": "febb9554-cf83-4f9b-94d9-1b3c34be357f",
|
||||
"tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa",
|
||||
"floating_ip_address": "192.168.4.5",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6",
|
||||
"fixed_ip_address": "10.0.3.99",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T15:11:37Z",
|
||||
"updated_at": "2023-08-30T15:11:38Z",
|
||||
"revision_number": 1,
|
||||
"project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
// HandleFloatingIPListSuccessfully mocks floating ips call.
|
||||
func (m *SDMock) HandleFloatingIPListSuccessfully() {
|
||||
m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) {
|
||||
m.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) {
|
||||
testMethod(m.t, r, http.MethodGet)
|
||||
testHeader(m.t, r, "X-Auth-Token", tokenID)
|
||||
|
||||
|
@ -617,6 +797,143 @@ func (m *SDMock) HandleFloatingIPListSuccessfully() {
|
|||
})
|
||||
}
|
||||
|
||||
const portsListBody = `
|
||||
{
|
||||
"ports": [
|
||||
{
|
||||
"id": "d5597901-48c8-4a69-a041-cfc5be158a04",
|
||||
"name": "",
|
||||
"network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"mac_address": "",
|
||||
"admin_state_up": true,
|
||||
"status": "DOWN",
|
||||
"device_id": "",
|
||||
"device_owner": "",
|
||||
"fixed_ips": [],
|
||||
"allowed_address_pairs": [],
|
||||
"extra_dhcp_opts": [],
|
||||
"security_groups": [],
|
||||
"description": "",
|
||||
"binding:vnic_type": "normal",
|
||||
"port_security_enabled": true,
|
||||
"dns_name": "",
|
||||
"dns_assignment": [],
|
||||
"dns_domain": "",
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T16:30:27Z",
|
||||
"updated_at": "2023-08-30T16:30:28Z",
|
||||
"revision_number": 0,
|
||||
"project_id": "fcad67a6189847c4aecfa3c81a05783b"
|
||||
},
|
||||
{
|
||||
"id": "4a45b012-0478-484d-8cf3-c8abdb194d08",
|
||||
"name": "ovn-lb-vip-0980c8de-58c3-481d-89e3-ed81f44286c0",
|
||||
"network_id": "03200a39-b399-44f3-a778-6dbb93343a31",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"mac_address": "fa:16:3e:23:12:a3",
|
||||
"admin_state_up": true,
|
||||
"status": "ACTIVE",
|
||||
"device_id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5",
|
||||
"device_owner": "",
|
||||
"fixed_ips": [
|
||||
{
|
||||
"subnet_id": "",
|
||||
"ip_address": "10.10.10.2"
|
||||
}
|
||||
],
|
||||
"allowed_address_pairs": [],
|
||||
"extra_dhcp_opts": [],
|
||||
"security_groups": [],
|
||||
"description": "",
|
||||
"binding:vnic_type": "normal",
|
||||
"port_security_enabled": true,
|
||||
"dns_name": "",
|
||||
"dns_assignment": [],
|
||||
"dns_domain": "",
|
||||
"tags": [],
|
||||
"created_at": "2023-09-06T15:45:36Z",
|
||||
"updated_at": "2023-09-06T15:45:36Z",
|
||||
"revision_number": 0,
|
||||
"project_id": "fcad67a6189847c4aecfa3c81a05783b"
|
||||
},
|
||||
{
|
||||
"id": "a0e244e8-7910-4427-b8d1-20470cad4f8a",
|
||||
"name": "ovn-lb-vip-26c0ccb1-3036-4345-99e8-d8f34a8ba6b2",
|
||||
"network_id": "03200a39-b399-44f3-a778-6dbb93343a31",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"mac_address": "fa:16:3e:5f:43:10",
|
||||
"admin_state_up": true,
|
||||
"status": "ACTIVE",
|
||||
"device_id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
|
||||
"device_owner": "",
|
||||
"fixed_ips": [
|
||||
{
|
||||
"subnet_id": "",
|
||||
"ip_address": "10.10.10.4"
|
||||
}
|
||||
],
|
||||
"allowed_address_pairs": [],
|
||||
"extra_dhcp_opts": [],
|
||||
"security_groups": [],
|
||||
"description": "",
|
||||
"binding:vnic_type": "normal",
|
||||
"port_security_enabled": true,
|
||||
"dns_name": "",
|
||||
"dns_assignment": [],
|
||||
"dns_domain": "",
|
||||
"tags": [],
|
||||
"created_at": "2024-01-24T13:30:50Z",
|
||||
"updated_at": "2024-01-24T13:30:51Z",
|
||||
"revision_number": 0,
|
||||
"project_id": "fcad67a6189847c4aecfa3c81a05783b"
|
||||
},
|
||||
{
|
||||
"id": "b926ab68-ec54-46d8-8c50-1c07aafd5ae9",
|
||||
"name": "dummy-port",
|
||||
"network_id": "03200a39-b399-44f3-a778-6dbb93343a31",
|
||||
"tenant_id": "b78fef2305934dbbbeb9a10b4c326f7a",
|
||||
"mac_address": "fa:16:3e:5f:12:10",
|
||||
"admin_state_up": true,
|
||||
"status": "ACTIVE",
|
||||
"device_id": "87caf8ed-d92a-41f6-9dcd-d1399e39899f",
|
||||
"device_owner": "",
|
||||
"fixed_ips": [
|
||||
{
|
||||
"subnet_id": "",
|
||||
"ip_address": "10.10.10.24"
|
||||
}
|
||||
],
|
||||
"allowed_address_pairs": [],
|
||||
"extra_dhcp_opts": [],
|
||||
"security_groups": [],
|
||||
"description": "",
|
||||
"binding:vnic_type": "normal",
|
||||
"port_security_enabled": true,
|
||||
"dns_name": "",
|
||||
"dns_assignment": [],
|
||||
"dns_domain": "",
|
||||
"tags": [],
|
||||
"created_at": "2024-01-24T13:30:50Z",
|
||||
"updated_at": "2024-01-24T13:30:51Z",
|
||||
"revision_number": 0,
|
||||
"project_id": "b78fef2305934dbbbeb9a10b4c326f7a"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
// HandlePortsListSuccessfully mocks the ports list API.
|
||||
func (m *SDMock) HandlePortsListSuccessfully() {
|
||||
m.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) {
|
||||
testMethod(m.t, r, http.MethodGet)
|
||||
testHeader(m.t, r, "X-Auth-Token", tokenID)
|
||||
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
fmt.Fprint(w, portsListBody)
|
||||
})
|
||||
}
|
||||
|
||||
const lbListBody = `
|
||||
{
|
||||
"loadbalancers": [
|
||||
|
@ -1084,105 +1401,3 @@ func (m *SDMock) HandleListenersListSuccessfully() {
|
|||
fmt.Fprint(w, listenerListBody)
|
||||
})
|
||||
}
|
||||
|
||||
const floatingIPListBody = `
|
||||
{
|
||||
"floatingips": [
|
||||
{
|
||||
"id": "fea7332d-9027-4cf9-bf62-c3c4c6ebaf84",
|
||||
"tenant_id": "fcad67a6189847c4aecfa3c81a05783b",
|
||||
"floating_ip_address": "192.168.1.2",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "b47c39f5-238d-4b17-ae87-9b5d19af8a2e",
|
||||
"fixed_ip_address": "10.0.0.32",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"port_details": {
|
||||
"name": "dummy",
|
||||
"network_id": "03200a39-b399-44f3-a778-6dbb93343a31",
|
||||
"mac_address": "fa:16:3e:b3:a3:c6",
|
||||
"admin_state_up": true,
|
||||
"status": "ACTIVE",
|
||||
"device_id": "",
|
||||
"device_owner": "compute:az1"
|
||||
},
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T15:11:37Z",
|
||||
"updated_at": "2023-08-30T15:11:38Z",
|
||||
"revision_number": 1,
|
||||
"project_id": "fcad67a6189847c4aecfa3c81a05783b"
|
||||
},
|
||||
{
|
||||
"id": "febb9554-cf83-4f9b-94d9-1b3c34be357f",
|
||||
"tenant_id": "ac57f03dba1a4fdebff3e67201bc7a85",
|
||||
"floating_ip_address": "192.168.3.4",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "c83b6e12-4e5d-4673-a4b3-5bc72a7f3ef9",
|
||||
"fixed_ip_address": "10.0.2.78",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"port_details": {
|
||||
"name": "dummy",
|
||||
"network_id": "03200a39-b399-44f3-a778-6dbb93343a31",
|
||||
"mac_address": "fa:16:3e:b3:a3:c6",
|
||||
"admin_state_up": true,
|
||||
"status": "ACTIVE",
|
||||
"device_id": "",
|
||||
"device_owner": "compute:az3"
|
||||
},
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T15:11:37Z",
|
||||
"updated_at": "2023-08-30T15:11:38Z",
|
||||
"revision_number": 1,
|
||||
"project_id": "ac57f03dba1a4fdebff3e67201bc7a85"
|
||||
},
|
||||
{
|
||||
"id": "febb9554-cf83-4f9b-94d9-1b3c34be357f",
|
||||
"tenant_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa",
|
||||
"floating_ip_address": "192.168.4.5",
|
||||
"floating_network_id": "d02c4f18-d606-4864-b12a-1c9b39a46be2",
|
||||
"router_id": "f03af93b-4e8f-4f55-adcf-a0317782ede2",
|
||||
"port_id": "f9e8b6e12-7e4d-4963-a5b3-6cd82a7f3ff6",
|
||||
"fixed_ip_address": "10.0.3.99",
|
||||
"status": "ACTIVE",
|
||||
"description": "",
|
||||
"port_details": {
|
||||
"name": "dummy",
|
||||
"network_id": "03200a39-b399-44f3-a778-6dbb93343a31",
|
||||
"mac_address": "fa:16:3e:b3:a3:c6",
|
||||
"admin_state_up": true,
|
||||
"status": "ACTIVE",
|
||||
"device_id": "",
|
||||
"device_owner": "compute:az3"
|
||||
},
|
||||
"dns_domain": "",
|
||||
"dns_name": "",
|
||||
"port_forwardings": [],
|
||||
"tags": [],
|
||||
"created_at": "2023-08-30T15:11:37Z",
|
||||
"updated_at": "2023-08-30T15:11:38Z",
|
||||
"revision_number": 1,
|
||||
"project_id": "fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
// HandleFloatingIPsListSuccessfully mocks the floating IPs endpoint.
|
||||
func (m *SDMock) HandleFloatingIPsListSuccessfully() {
|
||||
m.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) {
|
||||
testMethod(m.t, r, http.MethodGet)
|
||||
testHeader(m.t, r, "X-Auth-Token", tokenID)
|
||||
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
fmt.Fprint(w, floatingIPListBody)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ The Prometheus monitoring server
|
|||
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
|
||||
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
|
||||
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
|
||||
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
|
||||
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
|
||||
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |
|
||||
|
|
|
@ -59,9 +59,10 @@ Check the resources for validity.
|
|||
|
||||
#### Flags
|
||||
|
||||
| Flag | Description |
|
||||
| --- | --- |
|
||||
| <code class="text-nowrap">--extended</code> | Print extended information related to the cardinality of the metrics. |
|
||||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--query.lookback-delta</code> | The server's maximum query lookback duration. | `5m` |
|
||||
| <code class="text-nowrap">--extended</code> | Print extended information related to the cardinality of the metrics. | |
|
||||
|
||||
|
||||
|
||||
|
@ -102,8 +103,9 @@ Check if the config files are valid or not.
|
|||
| Flag | Description | Default |
|
||||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--syntax-only</code> | Only check the config file syntax, ignoring file and content validation referenced in the config | |
|
||||
| <code class="text-nowrap">--lint</code> | Linting checks to apply to the rules specified in the config. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` |
|
||||
| <code class="text-nowrap">--lint</code> | Linting checks to apply to the rules/scrape configs specified in the config. Available options are: all, duplicate-rules, none, too-long-scrape-interval. Use --lint=none to disable linting | `duplicate-rules` |
|
||||
| <code class="text-nowrap">--lint-fatal</code> | Make lint errors exit with exit code 3. | `false` |
|
||||
| <code class="text-nowrap">--ignore-unknown-fields</code> | Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` |
|
||||
| <code class="text-nowrap">--agent</code> | Check config file for Prometheus in Agent mode. | |
|
||||
|
||||
|
||||
|
@ -177,6 +179,7 @@ Check if the rule files are valid or not.
|
|||
| --- | --- | --- |
|
||||
| <code class="text-nowrap">--lint</code> | Linting checks to apply. Available options are: all, duplicate-rules, none. Use --lint=none to disable linting | `duplicate-rules` |
|
||||
| <code class="text-nowrap">--lint-fatal</code> | Make lint errors exit with exit code 3. | `false` |
|
||||
| <code class="text-nowrap">--ignore-unknown-fields</code> | Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` |
|
||||
|
||||
|
||||
|
||||
|
@ -464,6 +467,7 @@ Unit tests for rules.
|
|||
| <code class="text-nowrap">--run</code> <code class="text-nowrap">...<code class="text-nowrap"> | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | |
|
||||
| <code class="text-nowrap">--debug</code> | Enable unit test debugging. | `false` |
|
||||
| <code class="text-nowrap">--diff</code> | [Experimental] Print colored differential output between expected & received output. | `false` |
|
||||
| <code class="text-nowrap">--ignore-unknown-fields</code> | Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default. | `false` |
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@ global:
|
|||
[ scrape_interval: <duration> | default = 1m ]
|
||||
|
||||
# How long until a scrape request times out.
|
||||
# It cannot be greater than the scrape interval.
|
||||
[ scrape_timeout: <duration> | default = 10s ]
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
|
@ -221,6 +222,7 @@ job_name: <job_name>
|
|||
[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]
|
||||
|
||||
# Per-scrape timeout when scraping this job.
|
||||
# It cannot be greater than the scrape interval.
|
||||
[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]
|
||||
|
||||
# The protocols to negotiate during a scrape with the client.
|
||||
|
|
|
@ -68,7 +68,7 @@ versions.
|
|||
|
||||
| Name | Arguments | Returns | Notes |
|
||||
| ------------- | ------------- | ------- | ----------- |
|
||||
| title | string | string | [strings.Title](https://golang.org/pkg/strings/#Title), capitalises first character of each word.|
|
||||
| title | string | string | [cases.Title](https://pkg.go.dev/golang.org/x/text/cases#Title), capitalises first character of each word.|
|
||||
| toUpper | string | string | [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper), converts all characters to upper case.|
|
||||
| toLower | string | string | [strings.ToLower](https://golang.org/pkg/strings/#ToLower), converts all characters to lower case.|
|
||||
| stripPort | string | string | [net.SplitHostPort](https://pkg.go.dev/net#SplitHostPort), splits string into host and port, then returns only host.|
|
||||
|
|
|
@ -151,3 +151,25 @@ Configuration reloads are triggered by detecting changes in the checksum of the
|
|||
main configuration file or any referenced files, such as rule and scrape
|
||||
configurations. To ensure consistency and avoid issues during reloads, it's
|
||||
recommended to update these files atomically.
|
||||
|
||||
## OTLP Delta Conversion
|
||||
|
||||
`--enable-feature=otlp-deltatocumulative`
|
||||
|
||||
When enabled, Prometheus will convert OTLP metrics from delta temporality to their
|
||||
cumulative equivalent, instead of dropping them.
|
||||
|
||||
This uses
|
||||
[deltatocumulative][d2c]
|
||||
from the OTel collector, using its default settings.
|
||||
|
||||
Delta conversion keeps in-memory state to aggregate delta changes per-series over time.
|
||||
When Prometheus restarts, this state is lost, starting the aggregation from zero
|
||||
again. This results in a counter reset in the cumulative series.
|
||||
|
||||
This state is periodically ([`max_stale`][d2c]) cleared of inactive series.
|
||||
|
||||
Enabling this _can_ have negative impact on performance, because the in-memory
|
||||
state is mutex guarded. Cumulative-only OTLP requests are not affected.
|
||||
|
||||
[d2c]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor
|
||||
|
|
|
@ -8,7 +8,7 @@ sort_rank: 7
|
|||
Prometheus provides a generic [HTTP Service Discovery](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#http_sd_config),
|
||||
that enables it to discover targets over an HTTP endpoint.
|
||||
|
||||
The HTTP Service Discovery is complimentary to the supported service
|
||||
The HTTP Service Discovery is complementary to the supported service
|
||||
discovery mechanisms, and is an alternative to [File-based Service Discovery](https://prometheus.io/docs/guides/file-sd/#use-file-based-service-discovery-to-discover-scrape-targets).
|
||||
|
||||
## Comparison between File-Based SD and HTTP SD
|
||||
|
|
|
@ -60,36 +60,63 @@ This document offers guidance on migrating from Prometheus 2.x to Prometheus 3.0
|
|||
|
||||
## PromQL
|
||||
|
||||
- The `.` pattern in regular expressions in PromQL matches newline characters.
|
||||
With this change a regular expressions like `.*` matches strings that include
|
||||
`\n`. This applies to matchers in queries and relabel configs.
|
||||
- For example, the following regular expressions now match the accompanying
|
||||
strings, whereas in Prometheus v2 these combinations didn't match.
|
||||
- `.*` additionally matches `foo\n` and `Foo\nBar`
|
||||
- `foo.?bar` additionally matches `foo\nbar`
|
||||
- `foo.+bar` additionally matches `foo\nbar`
|
||||
- If you want Prometheus v3 to behave like v2, you will have to change your
|
||||
regular expressions by replacing all `.` patterns with `[^\n]`, e.g.
|
||||
`foo[^\n]*`.
|
||||
- Lookback and range selectors are left open and right closed (previously left
|
||||
closed and right closed). This change affects queries when the evaluation time
|
||||
perfectly aligns with the sample timestamps. For example assume querying a
|
||||
timeseries with evenly spaced samples exactly 1 minute apart. Before Prometheus
|
||||
v3, a range query with `5m` would usually return 5 samples. But if the query
|
||||
evaluation aligns perfectly with a scrape, it would return 6 samples. In
|
||||
Prometheus v3 queries like this will always return 5 samples.
|
||||
This change has likely few effects for everyday use, except for some subquery
|
||||
use cases.
|
||||
Query front-ends that align queries usually align subqueries to multiples of
|
||||
the step size. These subqueries will likely be affected.
|
||||
Tests are more likely to affected. To fix those either adjust the expected
|
||||
number of samples or extend the range by less than one sample interval.
|
||||
- The `holt_winters` function has been renamed to `double_exponential_smoothing`
|
||||
and is now guarded by the `promql-experimental-functions` feature flag.
|
||||
If you want to keep using `holt_winters`, you have to do both of these things:
|
||||
- Rename `holt_winters` to `double_exponential_smoothing` in your queries.
|
||||
- Pass `--enable-feature=promql-experimental-functions` in your Prometheus
|
||||
CLI invocation.
|
||||
### Regular expressions match newlines
|
||||
|
||||
The `.` pattern in regular expressions in PromQL matches newline characters.
|
||||
With this change a regular expressions like `.*` matches strings that include
|
||||
`\n`. This applies to matchers in queries and relabel configs.
|
||||
|
||||
For example, the following regular expressions now match the accompanying
|
||||
strings, whereas in Prometheus v2 these combinations didn't match.
|
||||
- `.*` additionally matches `foo\n` and `Foo\nBar`
|
||||
- `foo.?bar` additionally matches `foo\nbar`
|
||||
- `foo.+bar` additionally matches `foo\nbar`
|
||||
|
||||
If you want Prometheus v3 to behave like v2, you will have to change your
|
||||
regular expressions by replacing all `.` patterns with `[^\n]`, e.g.
|
||||
`foo[^\n]*`.
|
||||
|
||||
### Range selectors and lookback exclude samples coinciding with the left boundary
|
||||
|
||||
Lookback and range selectors are now left-open and right-closed (previously
|
||||
left-closed and right-closed), which makes their behavior more consistent. This
|
||||
change affects queries where the left boundary of a range or the lookback delta
|
||||
coincides with the timestamp of one or more samples.
|
||||
|
||||
For example, assume we are querying a timeseries with evenly spaced samples
|
||||
exactly 1 minute apart. Before Prometheus v3, a range query with `5m` would
|
||||
usually return 5 samples. But if the query evaluation aligns perfectly with a
|
||||
scrape, it would return 6 samples. In Prometheus v3 queries like this will
|
||||
always return 5 samples given even spacing.
|
||||
|
||||
This change will typically affect subqueries because their evaluation timing is
|
||||
naturally perfectly evenly spaced and aligned with timestamps that are multiples
|
||||
of the subquery resolution. Furthermore, query frontends often align subqueries
|
||||
to multiples of the step size. In combination, this easily creates a situation
|
||||
of perfect mutual alignment, often unintended and unknown by the user, so that
|
||||
the new behavior might come as a surprise. Before Prometheus V3, a subquery of
|
||||
`foo[1m:1m]` on such a system might have always returned two points, allowing
|
||||
for rate calculations. In Prometheus V3, however, such a subquery will only
|
||||
return one point, which is insufficient for a rate or increase calculation,
|
||||
resulting in No Data returned.
|
||||
|
||||
Such queries will need to be rewritten to extend the window to properly cover
|
||||
more than one point. In this example, `foo[2m:1m]` would always return two
|
||||
points no matter the query alignment. The exact form of the rewritten query may
|
||||
depend on the intended results and there is no universal drop-in replacement for
|
||||
queries whose behavior has changed.
|
||||
|
||||
Tests are similarly more likely to affected. To fix those either adjust the
|
||||
expected number of samples or extend the range.
|
||||
|
||||
### holt_winters function renamed
|
||||
|
||||
The `holt_winters` function has been renamed to `double_exponential_smoothing`
|
||||
and is now guarded by the `promql-experimental-functions` feature flag.
|
||||
If you want to keep using `holt_winters`, you have to do both of these things:
|
||||
- Rename `holt_winters` to `double_exponential_smoothing` in your queries.
|
||||
- Pass `--enable-feature=promql-experimental-functions` in your Prometheus
|
||||
CLI invocation.
|
||||
|
||||
## Scrape protocols
|
||||
Prometheus v3 is more strict concerning the Content-Type header received when
|
||||
|
|
|
@ -86,6 +86,7 @@ URL query parameters:
|
|||
- `time=<rfc3339 | unix_timestamp>`: Evaluation timestamp. Optional.
|
||||
- `timeout=<duration>`: Evaluation timeout. Optional. Defaults to and
|
||||
is capped by the value of the `-query.timeout` flag.
|
||||
- `limit=<number>`: Maximum number of returned series. Doesn’t affect scalars or strings but truncates the number of series for matrices and vectors. Optional. 0 means disabled.
|
||||
|
||||
The current server time is used if the `time` parameter is omitted.
|
||||
|
||||
|
@ -154,6 +155,7 @@ URL query parameters:
|
|||
- `step=<duration | float>`: Query resolution step width in `duration` format or float number of seconds.
|
||||
- `timeout=<duration>`: Evaluation timeout. Optional. Defaults to and
|
||||
is capped by the value of the `-query.timeout` flag.
|
||||
- `limit=<number>`: Maximum number of returned series. Optional. 0 means disabled.
|
||||
|
||||
You can URL-encode these parameters directly in the request body by using the `POST` method and
|
||||
`Content-Type: application/x-www-form-urlencoded` header. This is useful when specifying a large
|
||||
|
@ -1158,6 +1160,8 @@ $ curl http://localhost:9090/api/v1/status/runtimeinfo
|
|||
"data": {
|
||||
"startTime": "2019-11-02T17:23:59.301361365+01:00",
|
||||
"CWD": "/",
|
||||
"hostname" : "DESKTOP-717H17Q",
|
||||
"serverTime": "2025-01-05T18:27:33Z",
|
||||
"reloadConfigSuccess": true,
|
||||
"lastConfigTime": "2019-11-02T17:23:59+01:00",
|
||||
"timeSeriesCount": 873,
|
||||
|
@ -1420,6 +1424,15 @@ endpoint is `/api/v1/otlp/v1/metrics`.
|
|||
|
||||
*New in v2.47*
|
||||
|
||||
### OTLP Delta
|
||||
|
||||
Prometheus can convert incoming metrics from delta temporality to their cumulative equivalent.
|
||||
This is done using [deltatocumulative](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/deltatocumulativeprocessor) from the OpenTelemetry Collector.
|
||||
|
||||
To enable, pass `--enable-feature=otlp-deltatocumulative`.
|
||||
|
||||
*New in v3.2*
|
||||
|
||||
## Notifications
|
||||
|
||||
The following endpoints provide information about active status notifications concerning the Prometheus server itself.
|
||||
|
|
|
@ -55,8 +55,8 @@ require (
|
|||
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.30.0 // indirect
|
||||
golang.org/x/net v0.32.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.24.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
|
|
@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
|
|||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
|
@ -344,8 +344,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,20 +4,11 @@
|
|||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/grafonnet-lib.git",
|
||||
"subdir": "grafonnet"
|
||||
"remote": "https://github.com/grafana/grafonnet.git",
|
||||
"subdir": "gen/grafonnet-latest"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/grafana/jsonnet-libs.git",
|
||||
"subdir": "grafana-builder"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
"version": "main"
|
||||
}
|
||||
],
|
||||
"legacyImports": false
|
||||
|
|
37
go.mod
37
go.mod
|
@ -20,7 +20,7 @@ require (
|
|||
github.com/digitalocean/godo v1.132.0
|
||||
github.com/docker/docker v27.4.1+incompatible
|
||||
github.com/edsrzf/mmap-go v1.2.0
|
||||
github.com/envoyproxy/go-control-plane v0.13.1
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.2
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
|
||||
github.com/fsnotify/fsnotify v1.8.0
|
||||
|
@ -34,20 +34,21 @@ require (
|
|||
github.com/gophercloud/gophercloud v1.14.1
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
github.com/hashicorp/consul/api v1.30.0
|
||||
github.com/hashicorp/consul/api v1.31.0
|
||||
github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec
|
||||
github.com/hetznercloud/hcloud-go/v2 v2.17.1
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.0
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
|
||||
github.com/linode/linodego v1.43.0
|
||||
github.com/linode/linodego v1.44.0
|
||||
github.com/miekg/dns v1.1.62
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
|
||||
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0
|
||||
github.com/ovh/go-ovh v1.6.0
|
||||
github.com/prometheus/alertmanager v0.27.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
|
@ -55,12 +56,15 @@ require (
|
|||
github.com/prometheus/common v0.61.0
|
||||
github.com/prometheus/common/assets v0.2.0
|
||||
github.com/prometheus/exporter-toolkit v0.13.2
|
||||
github.com/prometheus/sigv4 v0.1.0
|
||||
github.com/prometheus/sigv4 v0.1.1
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
|
||||
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vultr/govultr/v2 v2.17.2
|
||||
go.opentelemetry.io/collector/component v0.116.0
|
||||
go.opentelemetry.io/collector/consumer v1.22.0
|
||||
go.opentelemetry.io/collector/pdata v1.22.0
|
||||
go.opentelemetry.io/collector/processor v0.116.0
|
||||
go.opentelemetry.io/collector/semconv v0.116.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0
|
||||
|
@ -68,21 +72,22 @@ require (
|
|||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0
|
||||
go.opentelemetry.io/otel/metric v1.33.0
|
||||
go.opentelemetry.io/otel/sdk v1.33.0
|
||||
go.opentelemetry.io/otel/trace v1.33.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
go.uber.org/goleak v1.3.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/oauth2 v0.25.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/tools v0.28.0
|
||||
google.golang.org/api v0.213.0
|
||||
google.golang.org/api v0.216.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484
|
||||
google.golang.org/grpc v1.69.0
|
||||
google.golang.org/protobuf v1.36.0
|
||||
google.golang.org/grpc v1.69.4
|
||||
google.golang.org/protobuf v1.36.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.31.3
|
||||
|
@ -127,7 +132,7 @@ require (
|
|||
github.com/go-openapi/spec v0.20.14 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.23.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.15.3 // indirect
|
||||
github.com/go-resty/resty/v2 v2.16.2 // indirect
|
||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/glog v1.2.2 // indirect
|
||||
|
@ -137,7 +142,7 @@ require (
|
|||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/hashicorp/cronexpr v1.1.2 // indirect
|
||||
|
@ -168,6 +173,8 @@ require (
|
|||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2 // indirect
|
||||
|
@ -184,15 +191,17 @@ require (
|
|||
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.116.0 // indirect
|
||||
go.opentelemetry.io/collector/pipeline v0.116.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.32.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
|
106
go.sum
106
go.sum
|
@ -110,8 +110,8 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
|
||||
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.2 h1:zidqwmijfcbyKqVxjQDFx042PgX+p9U+/fu/f9VtSk8=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
|
||||
|
@ -160,11 +160,13 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
|
|||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw=
|
||||
github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE=
|
||||
github.com/go-resty/resty/v2 v2.15.3 h1:bqff+hcqAflpiF591hhJzNdkRsFhlB96CYfBwSFvql8=
|
||||
github.com/go-resty/resty/v2 v2.15.3/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU=
|
||||
github.com/go-resty/resty/v2 v2.16.2 h1:CpRqTjIzq/rweXUt9+GxzzQdlkqMdt8Lm/fuK/CAbAg=
|
||||
github.com/go-resty/resty/v2 v2.16.2/go.mod h1:0fHAoK7JoBy/Ch36N8VFeMsK7xQOHhvWaC3iOktwmIU=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
|
||||
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
|
@ -213,8 +215,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
|
||||
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
|
||||
github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
|
@ -225,8 +227,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
|
||||
github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ=
|
||||
github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM=
|
||||
github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ=
|
||||
github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg=
|
||||
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
|
||||
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
|
||||
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
|
||||
|
@ -304,6 +306,12 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs=
|
||||
github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
|
||||
github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU=
|
||||
github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU=
|
||||
github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ=
|
||||
github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
|
||||
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -317,8 +325,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/linode/linodego v1.43.0 h1:sGeBB3caZt7vKBoPS5p4AVzmlG4JoqQOdigIibx3egk=
|
||||
github.com/linode/linodego v1.43.0/go.mod h1:n4TMFu1UVNala+icHqrTEFFaicYSF74cSAUG5zkTwfA=
|
||||
github.com/linode/linodego v1.44.0 h1:JZLLWzCAx3CmHSV9NmCoXisuqKtrmPhfY9MrgvaHMUY=
|
||||
github.com/linode/linodego v1.44.0/go.mod h1:umdoNOmtbqAdGQbmQnPFZ2YS4US+/mU/1bA7MjoKAvg=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
|
@ -348,6 +356,8 @@ github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJys
|
|||
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
|
||||
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
|
||||
|
@ -355,6 +365,8 @@ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp
|
|||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
|
@ -383,6 +395,14 @@ github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA
|
|||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0 h1:Kxk5Ral+Dc6VB9UmTketVjs+rbMZP8JxQ4SXDx4RivQ=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.116.0/go.mod h1:ctT6oQmGmWGGGgUIKyx2fDwqz77N9+04gqKkDyAzKCg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0 h1:ZBmLuipJv7BT9fho/2yAFsS8AtMsCOCe4ON8oqkX3n8=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.116.0/go.mod h1:f0GdYWGxUunyRZ088gHnoX78pc/gZc3dQlRtidiGXzg=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
|
@ -437,8 +457,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
|||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/sigv4 v0.1.0 h1:FgxH+m1qf9dGQ4w8Dd6VkthmpFQfGTzUeavMoQeG1LA=
|
||||
github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU=
|
||||
github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q=
|
||||
github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
|
@ -490,8 +510,36 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
|
|||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/component v0.116.0 h1:SQE1YeVfYCN7bw1n4hknUwJE5U/1qJL552sDhAdSlaA=
|
||||
go.opentelemetry.io/collector/component v0.116.0/go.mod h1:MYgXFZWDTq0uPgF1mkLSFibtpNqksRVAOrmihckOQEs=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.116.0 h1:wpgY0H2K9IPBzaNAvavKziK86VZ7TuNFQbS9OC4Z6Cs=
|
||||
go.opentelemetry.io/collector/component/componentstatus v0.116.0/go.mod h1:ZRlVwHFMGNfcsAywEJqivOn5JzDZkpe3KZVSwMWu4tw=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.116.0 h1:UIcnx4Rrs/oDRYSAZNHRMUiYs2FBlwgV5Nc0oMYfR6A=
|
||||
go.opentelemetry.io/collector/component/componenttest v0.116.0/go.mod h1:W40HaKPHdBFMVI7zzHE7dhdWC+CgAnAC9SmWetFBATY=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.116.0 h1:Vl49VCHQwBOeMswDpFwcl2HD8e9y94xlrfII3SR2VeQ=
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.116.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE=
|
||||
go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w=
|
||||
go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec=
|
||||
go.opentelemetry.io/collector/consumer v1.22.0 h1:QmfnNizyNZFt0uK3GG/EoT5h6PvZJ0dgVTc5hFEc1l0=
|
||||
go.opentelemetry.io/collector/consumer v1.22.0/go.mod h1:tiz2khNceFAPokxxfzAuFfIpShBasMT2AL2Sbc7+m0I=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.116.0 h1:pIVR7FtQMNAzfxBUSMEIC2dX5Lfo3O9ZBfx+sAwrrrM=
|
||||
go.opentelemetry.io/collector/consumer/consumertest v0.116.0/go.mod h1:cV3cNDiPnls5JdhnOJJFVlclrClg9kPs04cXgYP9Gmk=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.116.0 h1:ZrWvq7HumB0jRYmS2ztZ3hhXRNpUVBWPKMbPhsVGmZM=
|
||||
go.opentelemetry.io/collector/consumer/xconsumer v0.116.0/go.mod h1:C+VFMk8vLzPun6XK8aMts6h4RaDjmzXHCPaiOxzRQzQ=
|
||||
go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI=
|
||||
go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.116.0 h1:iE6lqkO7Hi6lTIIml1RI7yQ55CKqW12R2qHinwF5Zuk=
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.116.0/go.mod h1:xQiPpjzIiXRFb+1fPxUy/3ygEZgo0Bu/xmLKOWu8vMQ=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.116.0 h1:zmn1zpeX2BvzL6vt2dBF4OuAyFF2ml/OXcqflNgFiP0=
|
||||
go.opentelemetry.io/collector/pdata/testdata v0.116.0/go.mod h1:ytWzICFN4XTDP6o65B4+Ed52JGdqgk9B8CpLHCeCpMo=
|
||||
go.opentelemetry.io/collector/pipeline v0.116.0 h1:o8eKEuWEszmRpfShy7ElBoQ3Jo6kCi9ucm3yRgdNb9s=
|
||||
go.opentelemetry.io/collector/pipeline v0.116.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74=
|
||||
go.opentelemetry.io/collector/processor v0.116.0 h1:Kyu4tPzTdWNHtZjcxvI/bGNAgyv8L8Kem2r/Mk4IDAw=
|
||||
go.opentelemetry.io/collector/processor v0.116.0/go.mod h1:+/Ugy48RAxlZEXmN2cw51W8t5wdHS9No+GAoP+moskk=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.116.0 h1:+IqNEVEE0E2MsO2g7+Y/9dz35sDuvAXRXrLts9NdXrA=
|
||||
go.opentelemetry.io/collector/processor/processortest v0.116.0/go.mod h1:DLaQDBxzgeeaUO0ULMn/efos9PmHZkmYCHuxwCsiVHI=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.116.0 h1:iin/UwuWvSLB7ZNfINFUYbZ5lxIi1NjZ2brkyyFdiRA=
|
||||
go.opentelemetry.io/collector/processor/xprocessor v0.116.0/go.mod h1:cnA43/XpKDbaOmd8buqKp/LGJ2l/OoCqbR//u5DMfn8=
|
||||
go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y=
|
||||
go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI=
|
||||
|
@ -510,8 +558,8 @@ go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5W
|
|||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
|
@ -524,6 +572,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
|||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
|
@ -558,12 +608,12 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
|
||||
golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -614,8 +664,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -632,8 +682,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ=
|
||||
google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ=
|
||||
google.golang.org/api v0.216.0 h1:xnEHy+xWFrtYInWPy8OdGFsyIfWJjtVnO39g7pz2BFY=
|
||||
google.golang.org/api v0.216.0/go.mod h1:K9wzQMvWi47Z9IU7OgdOofvZuw75Ge3PPITImZR/UyI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
|
@ -641,17 +691,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98
|
|||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
|
||||
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
|
||||
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -314,7 +314,7 @@ func testTemplateParsing(rl *RuleNode) (errs []error) {
|
|||
}
|
||||
|
||||
// Parse parses and validates a set of rules.
|
||||
func Parse(content []byte) (*RuleGroups, []error) {
|
||||
func Parse(content []byte, ignoreUnknownFields bool) (*RuleGroups, []error) {
|
||||
var (
|
||||
groups RuleGroups
|
||||
node ruleGroups
|
||||
|
@ -322,7 +322,9 @@ func Parse(content []byte) (*RuleGroups, []error) {
|
|||
)
|
||||
|
||||
decoder := yaml.NewDecoder(bytes.NewReader(content))
|
||||
decoder.KnownFields(true)
|
||||
if !ignoreUnknownFields {
|
||||
decoder.KnownFields(true)
|
||||
}
|
||||
err := decoder.Decode(&groups)
|
||||
// Ignore io.EOF which happens with empty input.
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
|
@ -341,12 +343,12 @@ func Parse(content []byte) (*RuleGroups, []error) {
|
|||
}
|
||||
|
||||
// ParseFile reads and parses rules from a file.
|
||||
func ParseFile(file string) (*RuleGroups, []error) {
|
||||
func ParseFile(file string, ignoreUnknownFields bool) (*RuleGroups, []error) {
|
||||
b, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, []error{fmt.Errorf("%s: %w", file, err)}
|
||||
}
|
||||
rgs, errs := Parse(b)
|
||||
rgs, errs := Parse(b, ignoreUnknownFields)
|
||||
for i := range errs {
|
||||
errs[i] = fmt.Errorf("%s: %w", file, errs[i])
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
func TestParseFileSuccess(t *testing.T) {
|
||||
_, errs := ParseFile("testdata/test.yaml")
|
||||
_, errs := ParseFile("testdata/test.yaml", false)
|
||||
require.Empty(t, errs, "unexpected errors parsing file")
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ func TestParseFileFailure(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, c := range table {
|
||||
_, errs := ParseFile(filepath.Join("testdata", c.filename))
|
||||
_, errs := ParseFile(filepath.Join("testdata", c.filename), false)
|
||||
require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename)
|
||||
require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ groups:
|
|||
}
|
||||
|
||||
for _, tst := range tests {
|
||||
rgs, errs := Parse([]byte(tst.ruleString))
|
||||
rgs, errs := Parse([]byte(tst.ruleString), false)
|
||||
require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
|
||||
passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0)
|
||||
require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
|
||||
|
@ -206,7 +206,7 @@ groups:
|
|||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} up"
|
||||
`
|
||||
_, errs := Parse([]byte(group))
|
||||
_, errs := Parse([]byte(group), false)
|
||||
require.Len(t, errs, 2, "Expected two errors")
|
||||
var err00 *Error
|
||||
require.ErrorAs(t, errs[0], &err00)
|
||||
|
|
|
@ -177,61 +177,63 @@ func (p *NHCBParser) CreatedTimestamp() *int64 {
|
|||
}
|
||||
|
||||
func (p *NHCBParser) Next() (Entry, error) {
|
||||
if p.state == stateEmitting {
|
||||
p.state = stateStart
|
||||
if p.entry == EntrySeries {
|
||||
isNHCB := p.handleClassicHistogramSeries(p.lset)
|
||||
if isNHCB && !p.keepClassicHistograms {
|
||||
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
|
||||
return p.Next()
|
||||
for {
|
||||
if p.state == stateEmitting {
|
||||
p.state = stateStart
|
||||
if p.entry == EntrySeries {
|
||||
isNHCB := p.handleClassicHistogramSeries(p.lset)
|
||||
if isNHCB && !p.keepClassicHistograms {
|
||||
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
|
||||
continue
|
||||
}
|
||||
}
|
||||
return p.entry, p.err
|
||||
}
|
||||
return p.entry, p.err
|
||||
}
|
||||
|
||||
p.entry, p.err = p.parser.Next()
|
||||
if p.err != nil {
|
||||
if errors.Is(p.err, io.EOF) && p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
return EntryInvalid, p.err
|
||||
}
|
||||
switch p.entry {
|
||||
case EntrySeries:
|
||||
p.bytes, p.ts, p.value = p.parser.Series()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
// Check the label set to see if we can continue or need to emit the NHCB.
|
||||
var isNHCB bool
|
||||
if p.compareLabels() {
|
||||
// Labels differ. Check if we can emit the NHCB.
|
||||
if p.processNHCB() {
|
||||
p.entry, p.err = p.parser.Next()
|
||||
if p.err != nil {
|
||||
if errors.Is(p.err, io.EOF) && p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
isNHCB = p.handleClassicHistogramSeries(p.lset)
|
||||
} else {
|
||||
// Labels are the same. Check if after an exponential histogram.
|
||||
if p.lastHistogramExponential {
|
||||
isNHCB = false
|
||||
} else {
|
||||
isNHCB = p.handleClassicHistogramSeries(p.lset)
|
||||
}
|
||||
return EntryInvalid, p.err
|
||||
}
|
||||
if isNHCB && !p.keepClassicHistograms {
|
||||
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
|
||||
return p.Next()
|
||||
switch p.entry {
|
||||
case EntrySeries:
|
||||
p.bytes, p.ts, p.value = p.parser.Series()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
// Check the label set to see if we can continue or need to emit the NHCB.
|
||||
var isNHCB bool
|
||||
if p.compareLabels() {
|
||||
// Labels differ. Check if we can emit the NHCB.
|
||||
if p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
isNHCB = p.handleClassicHistogramSeries(p.lset)
|
||||
} else {
|
||||
// Labels are the same. Check if after an exponential histogram.
|
||||
if p.lastHistogramExponential {
|
||||
isNHCB = false
|
||||
} else {
|
||||
isNHCB = p.handleClassicHistogramSeries(p.lset)
|
||||
}
|
||||
}
|
||||
if isNHCB && !p.keepClassicHistograms {
|
||||
// Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms.
|
||||
continue
|
||||
}
|
||||
return p.entry, p.err
|
||||
case EntryHistogram:
|
||||
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
p.storeExponentialLabels()
|
||||
case EntryType:
|
||||
p.bName, p.typ = p.parser.Type()
|
||||
}
|
||||
if p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
return p.entry, p.err
|
||||
case EntryHistogram:
|
||||
p.bytes, p.ts, p.h, p.fh = p.parser.Histogram()
|
||||
p.metricString = p.parser.Metric(&p.lset)
|
||||
p.storeExponentialLabels()
|
||||
case EntryType:
|
||||
p.bName, p.typ = p.parser.Type()
|
||||
}
|
||||
if p.processNHCB() {
|
||||
return EntryHistogram, nil
|
||||
}
|
||||
return p.entry, p.err
|
||||
}
|
||||
|
||||
// Return true if labels have changed and we should emit the NHCB.
|
||||
|
|
|
@ -691,9 +691,15 @@ func funcLastOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNod
|
|||
|
||||
// === mad_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
return enh.Out, nil
|
||||
}
|
||||
if len(samples.Histograms) > 0 {
|
||||
metricName := samples.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
values := make(vectorByValueHeap, 0, len(s.Floats))
|
||||
for _, f := range s.Floats {
|
||||
|
@ -705,18 +711,20 @@ func funcMadOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
values = append(values, Sample{F: math.Abs(f.F - median)})
|
||||
}
|
||||
return quantile(0.5, values)
|
||||
}), nil
|
||||
}), annos
|
||||
}
|
||||
|
||||
// === max_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. max_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
return enh.Out, nil
|
||||
}
|
||||
if len(samples.Histograms) > 0 {
|
||||
metricName := samples.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
maxVal := s.Floats[0].F
|
||||
for _, f := range s.Floats {
|
||||
|
@ -725,18 +733,20 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
}
|
||||
}
|
||||
return maxVal
|
||||
}), nil
|
||||
}), annos
|
||||
}
|
||||
|
||||
// === min_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. min_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
return enh.Out, nil
|
||||
}
|
||||
if len(samples.Histograms) > 0 {
|
||||
metricName := samples.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
minVal := s.Floats[0].F
|
||||
for _, f := range s.Floats {
|
||||
|
@ -745,7 +755,7 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode
|
|||
}
|
||||
}
|
||||
return minVal
|
||||
}), nil
|
||||
}), annos
|
||||
}
|
||||
|
||||
// === sum_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
|
@ -794,10 +804,6 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva
|
|||
q := vals[0].(Vector)[0].F
|
||||
el := vals[1].(Matrix)[0]
|
||||
if len(el.Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. quantile_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
return enh.Out, nil
|
||||
}
|
||||
|
||||
|
@ -805,7 +811,10 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva
|
|||
if math.IsNaN(q) || q < 0 || q > 1 {
|
||||
annos.Add(annotations.NewInvalidQuantileWarning(q, args[0].PositionRange()))
|
||||
}
|
||||
|
||||
if len(el.Histograms) > 0 {
|
||||
metricName := el.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInAggregationInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
values := make(vectorByValueHeap, 0, len(el.Floats))
|
||||
for _, f := range el.Floats {
|
||||
values = append(values, Sample{F: f.F})
|
||||
|
@ -815,13 +824,15 @@ func funcQuantileOverTime(vals []parser.Value, args parser.Expressions, enh *Eva
|
|||
|
||||
// === stddev_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. stddev_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
return enh.Out, nil
|
||||
}
|
||||
if len(samples.Histograms) > 0 {
|
||||
metricName := samples.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
var count float64
|
||||
var mean, cMean float64
|
||||
|
@ -833,18 +844,20 @@ func funcStddevOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN
|
|||
aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
|
||||
}
|
||||
return math.Sqrt((aux + cAux) / count)
|
||||
}), nil
|
||||
}), annos
|
||||
}
|
||||
|
||||
// === stdvar_over_time(Matrix parser.ValueTypeMatrix) (Vector, Annotations) ===
|
||||
func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
|
||||
if len(vals[0].(Matrix)[0].Floats) == 0 {
|
||||
// TODO(beorn7): The passed values only contain
|
||||
// histograms. stdvar_over_time ignores histograms for now. If
|
||||
// there are only histograms, we have to return without adding
|
||||
// anything to enh.Out.
|
||||
samples := vals[0].(Matrix)[0]
|
||||
var annos annotations.Annotations
|
||||
if len(samples.Floats) == 0 {
|
||||
return enh.Out, nil
|
||||
}
|
||||
if len(samples.Histograms) > 0 {
|
||||
metricName := samples.Metric.Get(labels.MetricName)
|
||||
annos.Add(annotations.NewHistogramIgnoredInMixedRangeInfo(metricName, args[0].PositionRange()))
|
||||
}
|
||||
return aggrOverTime(vals, enh, func(s Series) float64 {
|
||||
var count float64
|
||||
var mean, cMean float64
|
||||
|
@ -856,7 +869,7 @@ func funcStdvarOverTime(vals []parser.Value, args parser.Expressions, enh *EvalN
|
|||
aux, cAux = kahanSumInc(delta*(f.F-(mean+cMean)), aux, cAux)
|
||||
}
|
||||
return (aux + cAux) / count
|
||||
}), nil
|
||||
}), annos
|
||||
}
|
||||
|
||||
// === absent(Vector parser.ValueTypeVector) (Vector, Annotations) ===
|
||||
|
|
|
@ -363,17 +363,18 @@ grouping_label_list:
|
|||
grouping_label : maybe_label
|
||||
{
|
||||
if !model.LabelName($1.Val).IsValid() {
|
||||
yylex.(*parser).unexpected("grouping opts", "label")
|
||||
yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", $1.Val)
|
||||
}
|
||||
$$ = $1
|
||||
}
|
||||
| STRING {
|
||||
if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() {
|
||||
yylex.(*parser).unexpected("grouping opts", "label")
|
||||
unquoted := yylex.(*parser).unquoteString($1.Val)
|
||||
if !model.LabelName(unquoted).IsValid() {
|
||||
yylex.(*parser).addParseErrf($1.PositionRange(),"invalid label name for grouping: %q", unquoted)
|
||||
}
|
||||
$$ = $1
|
||||
$$.Pos++
|
||||
$$.Val = yylex.(*parser).unquoteString($$.Val)
|
||||
$$.Val = unquoted
|
||||
}
|
||||
| error
|
||||
{ yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} }
|
||||
|
@ -487,7 +488,7 @@ matrix_selector : expr LEFT_BRACKET number_duration_literal RIGHT_BRACKET
|
|||
|
||||
if errMsg != ""{
|
||||
errRange := mergeRanges(&$2, &$4)
|
||||
yylex.(*parser).addParseErrf(errRange, errMsg)
|
||||
yylex.(*parser).addParseErrf(errRange, "%s", errMsg)
|
||||
}
|
||||
|
||||
numLit, _ := $3.(*NumberLiteral)
|
||||
|
|
|
@ -1259,19 +1259,20 @@ yydefault:
|
|||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
if !model.LabelName(yyDollar[1].item.Val).IsValid() {
|
||||
yylex.(*parser).unexpected("grouping opts", "label")
|
||||
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", yyDollar[1].item.Val)
|
||||
}
|
||||
yyVAL.item = yyDollar[1].item
|
||||
}
|
||||
case 59:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
{
|
||||
if !model.LabelName(yylex.(*parser).unquoteString(yyDollar[1].item.Val)).IsValid() {
|
||||
yylex.(*parser).unexpected("grouping opts", "label")
|
||||
unquoted := yylex.(*parser).unquoteString(yyDollar[1].item.Val)
|
||||
if !model.LabelName(unquoted).IsValid() {
|
||||
yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid label name for grouping: %q", unquoted)
|
||||
}
|
||||
yyVAL.item = yyDollar[1].item
|
||||
yyVAL.item.Pos++
|
||||
yyVAL.item.Val = yylex.(*parser).unquoteString(yyVAL.item.Val)
|
||||
yyVAL.item.Val = unquoted
|
||||
}
|
||||
case 60:
|
||||
yyDollar = yyS[yypt-1 : yypt+1]
|
||||
|
@ -1384,7 +1385,7 @@ yydefault:
|
|||
|
||||
if errMsg != "" {
|
||||
errRange := mergeRanges(&yyDollar[2].item, &yyDollar[4].item)
|
||||
yylex.(*parser).addParseErrf(errRange, errMsg)
|
||||
yylex.(*parser).addParseErrf(errRange, "%s", errMsg)
|
||||
}
|
||||
|
||||
numLit, _ := yyDollar[3].node.(*NumberLiteral)
|
||||
|
|
|
@ -22,7 +22,7 @@ Each test file contains a series of commands. There are three kinds of commands:
|
|||
|
||||
* `load`
|
||||
* `clear`
|
||||
* `eval`
|
||||
* `eval` (including the variants `eval_fail`, `eval_warn`, `eval_info`, and `eval_ordered`)
|
||||
|
||||
Each command is executed in the order given in the file.
|
||||
|
||||
|
@ -50,12 +50,12 @@ load 1m
|
|||
my_metric{env="prod"} 5 2+3x2 _ stale {{schema:1 sum:3 count:22 buckets:[5 10 7]}}
|
||||
```
|
||||
|
||||
...will create a single series with labels `my_metric{env="prod"}`, with the following points:
|
||||
… will create a single series with labels `my_metric{env="prod"}`, with the following points:
|
||||
|
||||
* t=0: value is 5
|
||||
* t=1m: value is 2
|
||||
* t=2m: value is 5
|
||||
* t=3m: value is 7
|
||||
* t=3m: value is 8
|
||||
* t=4m: no point
|
||||
* t=5m: stale marker
|
||||
* t=6m: native histogram with schema 1, sum -3, count 22 and bucket counts 5, 10 and 7
|
||||
|
@ -74,6 +74,7 @@ When loading a batch of classic histogram float series, you can optionally appen
|
|||
## `eval` command
|
||||
|
||||
`eval` runs a query against the test environment and asserts that the result is as expected.
|
||||
It requires the query to succeed without any (info or warn) annotations.
|
||||
|
||||
Both instant and range queries are supported.
|
||||
|
||||
|
@ -110,11 +111,18 @@ eval range from 0 to 3m step 1m sum by (env) (my_metric)
|
|||
{env="test"} 10 20 30 45
|
||||
```
|
||||
|
||||
Instant queries also support asserting that the series are returned in exactly the order specified: use `eval_ordered instant ...` instead of `eval instant ...`.
|
||||
This is not supported for range queries.
|
||||
To assert that a query succeeds with an info or warn annotation, use the
|
||||
`eval_info` or `eval_warn` commands, respectively.
|
||||
|
||||
It is also possible to test that queries fail: use `eval_fail instant ...` or `eval_fail range ...`.
|
||||
`eval_fail` optionally takes an expected error message string or regexp to assert that the error message is as expected.
|
||||
Instant queries also support asserting that the series are returned in exactly
|
||||
the order specified: use `eval_ordered instant ...` instead of `eval instant
|
||||
...`. `eval_ordered` ignores any annotations. The assertion always fails for
|
||||
matrix results.
|
||||
|
||||
To assert that a query fails, use the `eval_fail` command. `eval_fail` does not
|
||||
expect any result lines. Instead, it optionally accepts an expected error
|
||||
message string or regular expression to assert that the error message is as
|
||||
expected.
|
||||
|
||||
For example:
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
"github.com/prometheus/prometheus/promql/parser/posrange"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/util/almost"
|
||||
"github.com/prometheus/prometheus/util/annotations"
|
||||
"github.com/prometheus/prometheus/util/convertnhcb"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
|
@ -692,6 +693,24 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
|
|||
ev.expected[h] = entry{pos: pos, vals: vals}
|
||||
}
|
||||
|
||||
// checkAnnotations asserts if the annotations match the expectations.
|
||||
func (ev *evalCmd) checkAnnotations(expr string, annos annotations.Annotations) error {
|
||||
countWarnings, countInfo := annos.CountWarningsAndInfo()
|
||||
switch {
|
||||
case ev.ordered:
|
||||
// Ignore annotations if testing for order.
|
||||
case !ev.warn && countWarnings > 0:
|
||||
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
|
||||
case ev.warn && countWarnings == 0:
|
||||
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", expr, ev.line)
|
||||
case !ev.info && countInfo > 0:
|
||||
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", expr, ev.line, annos.AsErrors())
|
||||
case ev.info && countInfo == 0:
|
||||
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", expr, ev.line)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// compareResult compares the result value with the defined expectation.
|
||||
func (ev *evalCmd) compareResult(result parser.Value) error {
|
||||
switch val := result.(type) {
|
||||
|
@ -1131,6 +1150,7 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
defer q.Close()
|
||||
res := q.Exec(t.context)
|
||||
if res.Err != nil {
|
||||
if cmd.fail {
|
||||
|
@ -1142,18 +1162,9 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
|
|||
if res.Err == nil && cmd.fail {
|
||||
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||
}
|
||||
countWarnings, countInfo := res.Warnings.CountWarningsAndInfo()
|
||||
switch {
|
||||
case !cmd.warn && countWarnings > 0:
|
||||
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
|
||||
case cmd.warn && countWarnings == 0:
|
||||
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||
case !cmd.info && countInfo > 0:
|
||||
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings)
|
||||
case cmd.info && countInfo == 0:
|
||||
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||
if err := cmd.checkAnnotations(cmd.expr, res.Warnings); err != nil {
|
||||
return err
|
||||
}
|
||||
defer q.Close()
|
||||
|
||||
if err := cmd.compareResult(res.Value); err != nil {
|
||||
return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err)
|
||||
|
@ -1196,16 +1207,8 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
|
|||
if res.Err == nil && cmd.fail {
|
||||
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||
}
|
||||
countWarnings, countInfo := res.Warnings.CountWarningsAndInfo()
|
||||
switch {
|
||||
case !cmd.warn && countWarnings > 0:
|
||||
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
|
||||
case cmd.warn && countWarnings == 0:
|
||||
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||
case !cmd.info && countInfo > 0:
|
||||
return fmt.Errorf("unexpected info annotations evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
|
||||
case cmd.info && countInfo == 0:
|
||||
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line)
|
||||
if err := cmd.checkAnnotations(iq.expr, res.Warnings); err != nil {
|
||||
return err
|
||||
}
|
||||
err = cmd.compareResult(res.Value)
|
||||
if err != nil {
|
||||
|
@ -1218,11 +1221,11 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
|
|||
if err != nil {
|
||||
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
|
||||
}
|
||||
defer q.Close()
|
||||
rangeRes := q.Exec(t.context)
|
||||
if rangeRes.Err != nil {
|
||||
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
|
||||
}
|
||||
defer q.Close()
|
||||
if cmd.ordered {
|
||||
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
|
||||
return nil
|
||||
|
@ -1416,8 +1419,8 @@ func (ll *LazyLoader) appendTill(ts int64) error {
|
|||
|
||||
// WithSamplesTill loads the samples till given timestamp and executes the given function.
|
||||
func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) {
|
||||
tsMilli := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond
|
||||
fn(ll.appendTill(int64(tsMilli)))
|
||||
till := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond
|
||||
fn(ll.appendTill(int64(till)))
|
||||
}
|
||||
|
||||
// QueryEngine returns the LazyLoader's query engine.
|
||||
|
|
|
@ -353,6 +353,44 @@ eval_ordered instant at 50m sort(http_requests)
|
|||
`,
|
||||
expectedError: `error in eval sort(http_requests) (line 10): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`,
|
||||
},
|
||||
"instant query with results expected to match provided order, result is in expected order and info annotation is ignored": {
|
||||
input: testData + `
|
||||
eval_ordered instant at 50m sort(rate(http_requests[10m]))
|
||||
{group="production", instance="0", job="api-server"} 0.03333333333333333
|
||||
{group="production", instance="1", job="api-server"} 0.06666666666666667
|
||||
{group="canary", instance="0", job="api-server"} 0.1
|
||||
{group="canary", instance="1", job="api-server"} 0.13333333333333333
|
||||
`,
|
||||
},
|
||||
"instant query with expected info annotation": {
|
||||
input: testData + `
|
||||
eval_info instant at 50m sort(rate(http_requests[10m]))
|
||||
{group="production", instance="0", job="api-server"} 0.03333333333333333
|
||||
{group="production", instance="1", job="api-server"} 0.06666666666666667
|
||||
{group="canary", instance="0", job="api-server"} 0.1
|
||||
{group="canary", instance="1", job="api-server"} 0.13333333333333333
|
||||
`,
|
||||
},
|
||||
"instant query with unexpected info annotation": {
|
||||
input: testData + `
|
||||
eval instant at 50m sort(rate(http_requests[10m]))
|
||||
{group="production", instance="0", job="api-server"} 0.03333333333333333
|
||||
{group="production", instance="1", job="api-server"} 0.06666666666666667
|
||||
{group="canary", instance="0", job="api-server"} 0.1
|
||||
{group="canary", instance="1", job="api-server"} 0.13333333333333333
|
||||
`,
|
||||
expectedError: `unexpected info annotations evaluating query "sort(rate(http_requests[10m]))" (line 10): [PromQL info: metric might not be a counter, name does not end in _total/_sum/_count/_bucket: "http_requests"]`,
|
||||
},
|
||||
"instant query with unexpectedly missing warn annotation": {
|
||||
input: testData + `
|
||||
eval_warn instant at 50m sort(rate(http_requests[10m]))
|
||||
{group="production", instance="0", job="api-server"} 0.03333333333333333
|
||||
{group="production", instance="1", job="api-server"} 0.06666666666666667
|
||||
{group="canary", instance="0", job="api-server"} 0.1
|
||||
{group="canary", instance="1", job="api-server"} 0.13333333333333333
|
||||
`,
|
||||
expectedError: `expected warnings evaluating query "sort(rate(http_requests[10m]))" (line 10) but got none`,
|
||||
},
|
||||
"instant query with invalid timestamp": {
|
||||
input: `eval instant at abc123 vector(0)`,
|
||||
expectedError: `error in eval vector(0) (line 1): invalid timestamp definition "abc123": not a valid duration string: "abc123"`,
|
||||
|
|
78
promql/promqltest/testdata/functions.test
vendored
78
promql/promqltest/testdata/functions.test
vendored
|
@ -929,35 +929,58 @@ eval instant at 1m avg_over_time(metric[2m])
|
|||
# Tests for stddev_over_time and stdvar_over_time.
|
||||
clear
|
||||
load 10s
|
||||
metric 0 8 8 2 3
|
||||
metric 0 8 8 2 3
|
||||
metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5
|
||||
metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}}
|
||||
|
||||
eval instant at 1m stdvar_over_time(metric[2m])
|
||||
{} 10.56
|
||||
{} 10.56
|
||||
|
||||
eval instant at 1m stddev_over_time(metric[2m])
|
||||
{} 3.249615
|
||||
{} 3.249615
|
||||
|
||||
eval instant at 1m stddev_over_time((metric[2m]))
|
||||
{} 3.249615
|
||||
{} 3.249615
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time with histograms.
|
||||
eval instant at 1m stddev_over_time(metric_histogram{type="only_histogram"}[2m])
|
||||
#empty
|
||||
|
||||
eval_info instant at 1m stddev_over_time(metric_histogram{type="mix"}[2m])
|
||||
{type="mix"} 0
|
||||
|
||||
eval instant at 1m stdvar_over_time(metric_histogram{type="only_histogram"}[2m])
|
||||
#empty
|
||||
|
||||
eval_info instant at 1m stdvar_over_time(metric_histogram{type="mix"}[2m])
|
||||
{type="mix"} 0
|
||||
|
||||
# Tests for stddev_over_time and stdvar_over_time #4927.
|
||||
clear
|
||||
load 10s
|
||||
metric 1.5990505637277868 1.5990505637277868 1.5990505637277868
|
||||
metric 1.5990505637277868 1.5990505637277868 1.5990505637277868
|
||||
|
||||
eval instant at 1m stdvar_over_time(metric[1m])
|
||||
{} 0
|
||||
{} 0
|
||||
|
||||
eval instant at 1m stddev_over_time(metric[1m])
|
||||
{} 0
|
||||
{} 0
|
||||
|
||||
# Tests for mad_over_time.
|
||||
clear
|
||||
load 10s
|
||||
metric 4 6 2 1 999 1 2
|
||||
metric 4 6 2 1 999 1 2
|
||||
metric_histogram{type="only_histogram"} {{schema:1 sum:2 count:3}}x5
|
||||
metric_histogram{type="mix"} 1 1 1 {{schema:1 sum:2 count:3}} {{schema:1 sum:2 count:3}}
|
||||
|
||||
eval instant at 70s mad_over_time(metric[70s])
|
||||
{} 1
|
||||
{} 1
|
||||
|
||||
eval instant at 70s mad_over_time(metric_histogram{type="only_histogram"}[70s])
|
||||
#empty
|
||||
|
||||
eval_info instant at 70s mad_over_time(metric_histogram{type="mix"}[70s])
|
||||
{type="mix"} 0
|
||||
|
||||
# Tests for quantile_over_time
|
||||
clear
|
||||
|
@ -966,6 +989,8 @@ load 10s
|
|||
data{test="two samples"} 0 1
|
||||
data{test="three samples"} 0 1 2
|
||||
data{test="uneven samples"} 0 1 4
|
||||
data_histogram{test="only histogram samples"} {{schema:0 sum:1 count:2}}x4
|
||||
data_histogram{test="mix samples"} 0 1 2 {{schema:0 sum:1 count:2}}x2
|
||||
|
||||
eval instant at 1m quantile_over_time(0, data[2m])
|
||||
{test="two samples"} 0
|
||||
|
@ -1007,6 +1032,12 @@ eval_warn instant at 1m (quantile_over_time(2, (data[2m])))
|
|||
{test="three samples"} +Inf
|
||||
{test="uneven samples"} +Inf
|
||||
|
||||
eval instant at 1m quantile_over_time(0.5, data_histogram{test="only histogram samples"}[2m])
|
||||
#empty
|
||||
|
||||
eval_info instant at 1m quantile_over_time(0.5, data_histogram{test="mix samples"}[2m])
|
||||
{test="mix samples"} 1
|
||||
|
||||
clear
|
||||
|
||||
# Test time-related functions.
|
||||
|
@ -1120,15 +1151,17 @@ load 5m
|
|||
|
||||
eval_fail instant at 0m changes({__name__=~'testmetric1|testmetric2'}[5m])
|
||||
|
||||
# Tests for *_over_time
|
||||
clear
|
||||
|
||||
# Tests for *_over_time
|
||||
load 10s
|
||||
data{type="numbers"} 2 0 3
|
||||
data{type="some_nan"} 2 0 NaN
|
||||
data{type="some_nan2"} 2 NaN 1
|
||||
data{type="some_nan3"} NaN 0 1
|
||||
data{type="only_nan"} NaN NaN NaN
|
||||
data_histogram{type="only_histogram"} {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}} {{schema:0 sum:3 count:4}}
|
||||
data_histogram{type="mix_samples"} 0 1 {{schema:0 sum:1 count:2}} {{schema:0 sum:2 count:3}}
|
||||
|
||||
eval instant at 1m min_over_time(data[2m])
|
||||
{type="numbers"} 0
|
||||
|
@ -1137,6 +1170,12 @@ eval instant at 1m min_over_time(data[2m])
|
|||
{type="some_nan3"} 0
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m min_over_time(data_histogram{type="only_histogram"}[2m])
|
||||
#empty
|
||||
|
||||
eval_info instant at 1m min_over_time(data_histogram{type="mix_samples"}[2m])
|
||||
{type="mix_samples"} 0
|
||||
|
||||
eval instant at 1m max_over_time(data[2m])
|
||||
{type="numbers"} 3
|
||||
{type="some_nan"} 2
|
||||
|
@ -1144,12 +1183,29 @@ eval instant at 1m max_over_time(data[2m])
|
|||
{type="some_nan3"} 1
|
||||
{type="only_nan"} NaN
|
||||
|
||||
eval instant at 1m last_over_time(data[2m])
|
||||
eval instant at 1m max_over_time(data_histogram{type="only_histogram"}[2m])
|
||||
#empty
|
||||
|
||||
eval_info instant at 1m max_over_time(data_histogram{type="mix_samples"}[2m])
|
||||
{type="mix_samples"} 1
|
||||
|
||||
eval instant at 1m last_over_time({__name__=~"data(_histogram)?"}[2m])
|
||||
data{type="numbers"} 3
|
||||
data{type="some_nan"} NaN
|
||||
data{type="some_nan2"} 1
|
||||
data{type="some_nan3"} 1
|
||||
data{type="only_nan"} NaN
|
||||
data_histogram{type="only_histogram"} {{schema:0 sum:3 count:4}}
|
||||
data_histogram{type="mix_samples"} {{schema:0 sum:2 count:3}}
|
||||
|
||||
eval instant at 1m count_over_time({__name__=~"data(_histogram)?"}[2m])
|
||||
{type="numbers"} 3
|
||||
{type="some_nan"} 3
|
||||
{type="some_nan2"} 3
|
||||
{type="some_nan3"} 3
|
||||
{type="only_nan"} 3
|
||||
{type="only_histogram"} 3
|
||||
{type="mix_samples"} 4
|
||||
|
||||
clear
|
||||
|
||||
|
|
|
@ -1128,6 +1128,84 @@ eval_warn range from 0 to 12m step 6m sum(metric)
|
|||
eval_warn range from 0 to 12m step 6m avg(metric)
|
||||
{} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _
|
||||
|
||||
# Test incompatible schemas with additional aggregation operators
|
||||
eval range from 0 to 12m step 6m count(metric)
|
||||
{} 2 2 3
|
||||
|
||||
eval range from 0 to 12m step 6m group(metric)
|
||||
{} 1 1 1
|
||||
|
||||
eval range from 0 to 12m step 6m count(limitk(1, metric))
|
||||
{} 1 1 1
|
||||
|
||||
eval range from 0 to 12m step 6m limitk(3, metric)
|
||||
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
||||
metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval range from 0 to 12m step 6m limit_ratio(1, metric)
|
||||
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}}
|
||||
metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
# Test incompatible schemas with and/or
|
||||
eval range from 0 to 12m step 6m metric{series="1"} and ignoring(series) metric{series="2"}
|
||||
metric{series="1"} _ _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval range from 0 to 12m step 6m metric{series="1"} or ignoring(series) metric{series="2"}
|
||||
metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ _
|
||||
|
||||
# Test incompatible schemas with arithmetic binary operators
|
||||
eval_warn range from 0 to 12m step 6m metric{series="2"} + ignoring (series) metric{series="3"}
|
||||
|
||||
eval_warn range from 0 to 12m step 6m metric{series="2"} - ignoring (series) metric{series="3"}
|
||||
|
||||
clear
|
||||
|
||||
# Test incompatible schemas with comparison binary operators
|
||||
load 6m
|
||||
metric1 {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
metric2 {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval range from 0 to 6m step 6m metric1 == metric2
|
||||
metric1{} _ {{schema:-53 count:1 sum:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval range from 0 to 6m step 6m metric1 != metric2
|
||||
metric1{} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _
|
||||
|
||||
eval_info range from 0 to 6m step 6m metric2 > metric2
|
||||
|
||||
clear
|
||||
|
||||
load 6m
|
||||
nhcb_metric {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval_warn instant at 12m sum_over_time(nhcb_metric[13m])
|
||||
|
||||
eval_warn instant at 12m avg_over_time(nhcb_metric[13m])
|
||||
|
||||
eval instant at 12m last_over_time(nhcb_metric[13m])
|
||||
nhcb_metric{} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}}
|
||||
|
||||
eval instant at 12m count_over_time(nhcb_metric[13m])
|
||||
{} 3
|
||||
|
||||
eval instant at 12m present_over_time(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
eval instant at 12m changes(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
eval_warn instant at 12m delta(nhcb_metric[13m])
|
||||
|
||||
eval_warn instant at 12m increase(nhcb_metric[13m])
|
||||
|
||||
eval_warn instant at 12m rate(nhcb_metric[13m])
|
||||
|
||||
eval instant at 12m resets(nhcb_metric[13m])
|
||||
{} 1
|
||||
|
||||
clear
|
||||
|
||||
load 1m
|
||||
|
|
|
@ -143,8 +143,9 @@ type AlertingRule struct {
|
|||
|
||||
logger *slog.Logger
|
||||
|
||||
noDependentRules *atomic.Bool
|
||||
noDependencyRules *atomic.Bool
|
||||
dependenciesMutex sync.RWMutex
|
||||
dependentRules []Rule
|
||||
dependencyRules []Rule
|
||||
}
|
||||
|
||||
// NewAlertingRule constructs a new AlertingRule.
|
||||
|
@ -171,8 +172,6 @@ func NewAlertingRule(
|
|||
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
||||
evaluationDuration: atomic.NewDuration(0),
|
||||
lastError: atomic.NewError(nil),
|
||||
noDependentRules: atomic.NewBool(false),
|
||||
noDependencyRules: atomic.NewBool(false),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,20 +315,54 @@ func (r *AlertingRule) Restored() bool {
|
|||
return r.restored.Load()
|
||||
}
|
||||
|
||||
func (r *AlertingRule) SetNoDependentRules(noDependentRules bool) {
|
||||
r.noDependentRules.Store(noDependentRules)
|
||||
func (r *AlertingRule) SetDependentRules(dependents []Rule) {
|
||||
r.dependenciesMutex.Lock()
|
||||
defer r.dependenciesMutex.Unlock()
|
||||
|
||||
r.dependentRules = make([]Rule, len(dependents))
|
||||
copy(r.dependentRules, dependents)
|
||||
}
|
||||
|
||||
func (r *AlertingRule) NoDependentRules() bool {
|
||||
return r.noDependentRules.Load()
|
||||
r.dependenciesMutex.RLock()
|
||||
defer r.dependenciesMutex.RUnlock()
|
||||
|
||||
if r.dependentRules == nil {
|
||||
return false // We don't know if there are dependent rules.
|
||||
}
|
||||
|
||||
return len(r.dependentRules) == 0
|
||||
}
|
||||
|
||||
func (r *AlertingRule) SetNoDependencyRules(noDependencyRules bool) {
|
||||
r.noDependencyRules.Store(noDependencyRules)
|
||||
func (r *AlertingRule) DependentRules() []Rule {
|
||||
r.dependenciesMutex.RLock()
|
||||
defer r.dependenciesMutex.RUnlock()
|
||||
return r.dependentRules
|
||||
}
|
||||
|
||||
func (r *AlertingRule) SetDependencyRules(dependencies []Rule) {
|
||||
r.dependenciesMutex.Lock()
|
||||
defer r.dependenciesMutex.Unlock()
|
||||
|
||||
r.dependencyRules = make([]Rule, len(dependencies))
|
||||
copy(r.dependencyRules, dependencies)
|
||||
}
|
||||
|
||||
func (r *AlertingRule) NoDependencyRules() bool {
|
||||
return r.noDependencyRules.Load()
|
||||
r.dependenciesMutex.RLock()
|
||||
defer r.dependenciesMutex.RUnlock()
|
||||
|
||||
if r.dependencyRules == nil {
|
||||
return false // We don't know if there are dependency rules.
|
||||
}
|
||||
|
||||
return len(r.dependencyRules) == 0
|
||||
}
|
||||
|
||||
func (r *AlertingRule) DependencyRules() []Rule {
|
||||
r.dependenciesMutex.RLock()
|
||||
defer r.dependenciesMutex.RUnlock()
|
||||
return r.dependencyRules
|
||||
}
|
||||
|
||||
// resolvedRetention is the duration for which a resolved alert instance
|
||||
|
|
|
@ -998,7 +998,9 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
|
|||
require.Equal(t, detail, NewRuleDetail(rule))
|
||||
}
|
||||
|
||||
func TestAlertingRule_SetNoDependentRules(t *testing.T) {
|
||||
func TestAlertingRule_SetDependentRules(t *testing.T) {
|
||||
dependentRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
|
||||
|
||||
rule := NewAlertingRule(
|
||||
"test",
|
||||
&parser.NumberLiteral{Val: 1},
|
||||
|
@ -1012,14 +1014,18 @@ func TestAlertingRule_SetNoDependentRules(t *testing.T) {
|
|||
)
|
||||
require.False(t, rule.NoDependentRules())
|
||||
|
||||
rule.SetNoDependentRules(false)
|
||||
rule.SetDependentRules([]Rule{dependentRule})
|
||||
require.False(t, rule.NoDependentRules())
|
||||
require.Equal(t, []Rule{dependentRule}, rule.DependentRules())
|
||||
|
||||
rule.SetNoDependentRules(true)
|
||||
rule.SetDependentRules([]Rule{})
|
||||
require.True(t, rule.NoDependentRules())
|
||||
require.Empty(t, rule.DependentRules())
|
||||
}
|
||||
|
||||
func TestAlertingRule_SetNoDependencyRules(t *testing.T) {
|
||||
func TestAlertingRule_SetDependencyRules(t *testing.T) {
|
||||
dependencyRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
|
||||
|
||||
rule := NewAlertingRule(
|
||||
"test",
|
||||
&parser.NumberLiteral{Val: 1},
|
||||
|
@ -1033,11 +1039,13 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) {
|
|||
)
|
||||
require.False(t, rule.NoDependencyRules())
|
||||
|
||||
rule.SetNoDependencyRules(false)
|
||||
rule.SetDependencyRules([]Rule{dependencyRule})
|
||||
require.False(t, rule.NoDependencyRules())
|
||||
require.Equal(t, []Rule{dependencyRule}, rule.DependencyRules())
|
||||
|
||||
rule.SetNoDependencyRules(true)
|
||||
rule.SetDependencyRules([]Rule{})
|
||||
require.True(t, rule.NoDependencyRules())
|
||||
require.Empty(t, rule.DependencyRules())
|
||||
}
|
||||
|
||||
func TestAlertingRule_ActiveAlertsCount(t *testing.T) {
|
||||
|
|
6
rules/fixtures/alert_rule.yaml
Normal file
6
rules/fixtures/alert_rule.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
groups:
|
||||
- name: test
|
||||
interval: 1s
|
||||
rules:
|
||||
- alert: rule1
|
||||
expr: 1 < bool 2
|
6
rules/fixtures/alert_rule1.yaml
Normal file
6
rules/fixtures/alert_rule1.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
groups:
|
||||
- name: test2
|
||||
interval: 1s
|
||||
rules:
|
||||
- alert: rule2
|
||||
expr: 1 < bool 2
|
22
rules/fixtures/rules_chain.yaml
Normal file
22
rules/fixtures/rules_chain.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
groups:
|
||||
- name: chain
|
||||
rules:
|
||||
# Evaluated concurrently, no dependencies
|
||||
- record: job:http_requests:rate1m
|
||||
expr: sum by (job)(rate(http_requests_total[1m]))
|
||||
- record: job:http_requests:rate5m
|
||||
expr: sum by (job)(rate(http_requests_total[1m]))
|
||||
|
||||
# Evaluated sequentially, dependents and dependencies
|
||||
- record: job1:http_requests:rate1m
|
||||
expr: job:http_requests:rate1m{job="job1"}
|
||||
- record: job1_cluster1:http_requests:rate1m
|
||||
expr: job1:http_requests:rate1m{cluster="cluster1"}
|
||||
|
||||
# Evaluated concurrently, no dependents
|
||||
- record: job1_cluster2:http_requests:rate1m
|
||||
expr: job1:http_requests:rate1m{cluster="cluster2"}
|
||||
- record: job1_cluster1_namespace1:http_requests:rate1m
|
||||
expr: job1_cluster1:http_requests:rate1m{namespace="namespace1"}
|
||||
- record: job1_cluster1_namespace2:http_requests:rate1m
|
||||
expr: job1_cluster1:http_requests:rate1m{namespace="namespace2"}
|
21
rules/fixtures/rules_multiple_dependents_on_base.yaml
Normal file
21
rules/fixtures/rules_multiple_dependents_on_base.yaml
Normal file
|
@ -0,0 +1,21 @@
|
|||
groups:
|
||||
- name: concurrent_dependents
|
||||
rules:
|
||||
# 3 dependents on the same base
|
||||
- record: job:http_requests:rate1m
|
||||
expr: sum by (job)(rate(http_requests_total[1m]))
|
||||
- record: job1:http_requests:rate1m
|
||||
expr: job:http_requests:rate1m{job="job1"}
|
||||
- record: job2:http_requests:rate1m
|
||||
expr: job:http_requests:rate1m{job="job2"}
|
||||
- record: job3:http_requests:rate1m
|
||||
expr: job:http_requests:rate1m{job="job3"}
|
||||
# another 3 dependents on the same base
|
||||
- record: job:http_requests:rate5m
|
||||
expr: sum by (job)(rate(http_requests_total[5m]))
|
||||
- record: job1:http_requests:rate5m
|
||||
expr: job:http_requests:rate5m{job="job1"}
|
||||
- record: job2:http_requests:rate5m
|
||||
expr: job:http_requests:rate5m{job="job2"}
|
||||
- record: job3:http_requests:rate5m
|
||||
expr: job:http_requests:rate5m{job="job3"}
|
|
@ -6,6 +6,8 @@ groups:
|
|||
expr: sum by (job)(rate(http_requests_total[1m]))
|
||||
- record: job:http_requests:rate5m
|
||||
expr: sum by (job)(rate(http_requests_total[5m]))
|
||||
- record: job:http_requests:rate10m
|
||||
expr: sum by (job)(rate(http_requests_total[10m]))
|
||||
|
||||
# dependents
|
||||
- record: job:http_requests:rate15m
|
||||
|
@ -20,6 +22,8 @@ groups:
|
|||
expr: sum by (job)(rate(grpc_requests_total[1m]))
|
||||
- record: job:grpc_requests:rate5m
|
||||
expr: sum by (job)(rate(grpc_requests_total[5m]))
|
||||
- record: job:grpc_requests:rate10m
|
||||
expr: sum by (job)(rate(grpc_requests_total[10m]))
|
||||
|
||||
# dependents
|
||||
- record: job:grpc_requests:rate15m
|
||||
|
|
134
rules/group.go
134
rules/group.go
|
@ -74,9 +74,7 @@ type Group struct {
|
|||
// defaults to DefaultEvalIterationFunc.
|
||||
evalIterationFunc GroupEvalIterationFunc
|
||||
|
||||
// concurrencyController controls the rules evaluation concurrency.
|
||||
concurrencyController RuleConcurrencyController
|
||||
appOpts *storage.AppendOptions
|
||||
appOpts *storage.AppendOptions
|
||||
}
|
||||
|
||||
// GroupEvalIterationFunc is used to implement and extend rule group
|
||||
|
@ -126,33 +124,27 @@ func NewGroup(o GroupOptions) *Group {
|
|||
evalIterationFunc = DefaultEvalIterationFunc
|
||||
}
|
||||
|
||||
concurrencyController := opts.RuleConcurrencyController
|
||||
if concurrencyController == nil {
|
||||
concurrencyController = sequentialRuleEvalController{}
|
||||
}
|
||||
|
||||
if opts.Logger == nil {
|
||||
opts.Logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
return &Group{
|
||||
name: o.Name,
|
||||
file: o.File,
|
||||
interval: o.Interval,
|
||||
queryOffset: o.QueryOffset,
|
||||
limit: o.Limit,
|
||||
rules: o.Rules,
|
||||
shouldRestore: o.ShouldRestore,
|
||||
opts: opts,
|
||||
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
|
||||
done: make(chan struct{}),
|
||||
managerDone: o.done,
|
||||
terminated: make(chan struct{}),
|
||||
logger: opts.Logger.With("file", o.File, "group", o.Name),
|
||||
metrics: metrics,
|
||||
evalIterationFunc: evalIterationFunc,
|
||||
concurrencyController: concurrencyController,
|
||||
appOpts: &storage.AppendOptions{DiscardOutOfOrder: true},
|
||||
name: o.Name,
|
||||
file: o.File,
|
||||
interval: o.Interval,
|
||||
queryOffset: o.QueryOffset,
|
||||
limit: o.Limit,
|
||||
rules: o.Rules,
|
||||
shouldRestore: o.ShouldRestore,
|
||||
opts: opts,
|
||||
seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
|
||||
done: make(chan struct{}),
|
||||
managerDone: o.done,
|
||||
terminated: make(chan struct{}),
|
||||
logger: opts.Logger.With("file", o.File, "group", o.Name),
|
||||
metrics: metrics,
|
||||
evalIterationFunc: evalIterationFunc,
|
||||
appOpts: &storage.AppendOptions{DiscardOutOfOrder: true},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -310,11 +302,19 @@ func (g *Group) run(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (g *Group) stop() {
|
||||
func (g *Group) stopAsync() {
|
||||
close(g.done)
|
||||
}
|
||||
|
||||
func (g *Group) waitStopped() {
|
||||
<-g.terminated
|
||||
}
|
||||
|
||||
func (g *Group) stop() {
|
||||
g.stopAsync()
|
||||
g.waitStopped()
|
||||
}
|
||||
|
||||
func (g *Group) hash() uint64 {
|
||||
l := labels.New(
|
||||
labels.Label{Name: "name", Value: g.name},
|
||||
|
@ -647,25 +647,51 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
|
|||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for i, rule := range g.rules {
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
ctrl := g.opts.RuleConcurrencyController
|
||||
if ctrl == nil {
|
||||
ctrl = sequentialRuleEvalController{}
|
||||
}
|
||||
|
||||
if ctrl := g.concurrencyController; ctrl.Allow(ctx, g, rule) {
|
||||
wg.Add(1)
|
||||
|
||||
go eval(i, rule, func() {
|
||||
wg.Done()
|
||||
ctrl.Done(ctx)
|
||||
})
|
||||
} else {
|
||||
batches := ctrl.SplitGroupIntoBatches(ctx, g)
|
||||
if len(batches) == 0 {
|
||||
// Sequential evaluation when batches aren't set.
|
||||
// This is the behaviour without a defined RuleConcurrencyController
|
||||
for i, rule := range g.rules {
|
||||
// Check if the group has been stopped.
|
||||
select {
|
||||
case <-g.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
eval(i, rule, nil)
|
||||
}
|
||||
} else {
|
||||
// Concurrent evaluation.
|
||||
for _, batch := range batches {
|
||||
for _, ruleIndex := range batch {
|
||||
// Check if the group has been stopped.
|
||||
select {
|
||||
case <-g.done:
|
||||
wg.Wait()
|
||||
return
|
||||
default:
|
||||
}
|
||||
rule := g.rules[ruleIndex]
|
||||
if len(batch) > 1 && ctrl.Allow(ctx, g, rule) {
|
||||
wg.Add(1)
|
||||
|
||||
go eval(ruleIndex, rule, func() {
|
||||
wg.Done()
|
||||
ctrl.Done(ctx)
|
||||
})
|
||||
} else {
|
||||
eval(ruleIndex, rule, nil)
|
||||
}
|
||||
}
|
||||
// It is important that we finish processing any rules in this current batch - before we move into the next one.
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
g.metrics.GroupSamples.WithLabelValues(GroupKey(g.File(), g.Name())).Set(samplesTotal.Load())
|
||||
g.cleanupStaleSeries(ctx, ts)
|
||||
|
@ -1034,27 +1060,25 @@ func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
|
|||
// output metric produced by another rule in its expression (i.e. as its "input").
|
||||
type dependencyMap map[Rule][]Rule
|
||||
|
||||
// dependents returns the count of rules which use the output of the given rule as one of their inputs.
|
||||
func (m dependencyMap) dependents(r Rule) int {
|
||||
return len(m[r])
|
||||
// dependents returns the rules which use the output of the given rule as one of their inputs.
|
||||
func (m dependencyMap) dependents(r Rule) []Rule {
|
||||
return m[r]
|
||||
}
|
||||
|
||||
// dependencies returns the count of rules on which the given rule is dependent for input.
|
||||
func (m dependencyMap) dependencies(r Rule) int {
|
||||
// dependencies returns the rules on which the given rule is dependent for input.
|
||||
func (m dependencyMap) dependencies(r Rule) []Rule {
|
||||
if len(m) == 0 {
|
||||
return 0
|
||||
return []Rule{}
|
||||
}
|
||||
|
||||
var count int
|
||||
for _, children := range m {
|
||||
for _, child := range children {
|
||||
if child == r {
|
||||
count++
|
||||
}
|
||||
var dependencies []Rule
|
||||
for rule, dependents := range m {
|
||||
if slices.Contains(dependents, r) {
|
||||
dependencies = append(dependencies, rule)
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
return dependencies
|
||||
}
|
||||
|
||||
// isIndependent determines whether the given rule is not dependent on another rule for its input, nor is any other rule
|
||||
|
@ -1064,7 +1088,7 @@ func (m dependencyMap) isIndependent(r Rule) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
return m.dependents(r)+m.dependencies(r) == 0
|
||||
return len(m.dependents(r)) == 0 && len(m.dependencies(r)) == 0
|
||||
}
|
||||
|
||||
// buildDependencyMap builds a data-structure which contains the relationships between rules within a group.
|
||||
|
|
115
rules/manager.go
115
rules/manager.go
|
@ -90,12 +90,13 @@ func DefaultEvalIterationFunc(ctx context.Context, g *Group, evalTimestamp time.
|
|||
|
||||
// The Manager manages recording and alerting rules.
|
||||
type Manager struct {
|
||||
opts *ManagerOptions
|
||||
groups map[string]*Group
|
||||
mtx sync.RWMutex
|
||||
block chan struct{}
|
||||
done chan struct{}
|
||||
restored bool
|
||||
opts *ManagerOptions
|
||||
groups map[string]*Group
|
||||
mtx sync.RWMutex
|
||||
block chan struct{}
|
||||
done chan struct{}
|
||||
restored bool
|
||||
restoreNewRuleGroups bool
|
||||
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
@ -122,6 +123,10 @@ type ManagerOptions struct {
|
|||
ConcurrentEvalsEnabled bool
|
||||
RuleConcurrencyController RuleConcurrencyController
|
||||
RuleDependencyController RuleDependencyController
|
||||
// At present, manager only restores `for` state when manager is newly created which happens
|
||||
// during restarts. This flag provides an option to restore the `for` state when new rule groups are
|
||||
// added to an existing manager
|
||||
RestoreNewRuleGroups bool
|
||||
|
||||
Metrics *Metrics
|
||||
}
|
||||
|
@ -154,11 +159,12 @@ func NewManager(o *ManagerOptions) *Manager {
|
|||
}
|
||||
|
||||
m := &Manager{
|
||||
groups: map[string]*Group{},
|
||||
opts: o,
|
||||
block: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
logger: o.Logger,
|
||||
groups: map[string]*Group{},
|
||||
opts: o,
|
||||
block: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
logger: o.Logger,
|
||||
restoreNewRuleGroups: o.RestoreNewRuleGroups,
|
||||
}
|
||||
|
||||
return m
|
||||
|
@ -182,8 +188,14 @@ func (m *Manager) Stop() {
|
|||
|
||||
m.logger.Info("Stopping rule manager...")
|
||||
|
||||
// Stop all groups asynchronously, then wait for them to finish.
|
||||
// This is faster than stopping and waiting for each group in sequence.
|
||||
for _, eg := range m.groups {
|
||||
eg.stop()
|
||||
eg.stopAsync()
|
||||
}
|
||||
|
||||
for _, eg := range m.groups {
|
||||
eg.waitStopped()
|
||||
}
|
||||
|
||||
// Shut down the groups waiting multiple evaluation intervals to write
|
||||
|
@ -207,7 +219,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels
|
|||
default:
|
||||
}
|
||||
|
||||
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, files...)
|
||||
groups, errs := m.LoadGroups(interval, externalLabels, externalURL, groupEvalIterationFunc, false, files...)
|
||||
|
||||
if errs != nil {
|
||||
for _, e := range errs {
|
||||
|
@ -276,7 +288,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels
|
|||
|
||||
// GroupLoader is responsible for loading rule groups from arbitrary sources and parsing them.
|
||||
type GroupLoader interface {
|
||||
Load(identifier string) (*rulefmt.RuleGroups, []error)
|
||||
Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error)
|
||||
Parse(query string) (parser.Expr, error)
|
||||
}
|
||||
|
||||
|
@ -284,22 +296,22 @@ type GroupLoader interface {
|
|||
// and parser.ParseExpr.
|
||||
type FileLoader struct{}
|
||||
|
||||
func (FileLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {
|
||||
return rulefmt.ParseFile(identifier)
|
||||
func (FileLoader) Load(identifier string, ignoreUnknownFields bool) (*rulefmt.RuleGroups, []error) {
|
||||
return rulefmt.ParseFile(identifier, ignoreUnknownFields)
|
||||
}
|
||||
|
||||
func (FileLoader) Parse(query string) (parser.Expr, error) { return parser.ParseExpr(query) }
|
||||
|
||||
// LoadGroups reads groups from a list of files.
|
||||
func (m *Manager) LoadGroups(
|
||||
interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, filenames ...string,
|
||||
interval time.Duration, externalLabels labels.Labels, externalURL string, groupEvalIterationFunc GroupEvalIterationFunc, ignoreUnknownFields bool, filenames ...string,
|
||||
) (map[string]*Group, []error) {
|
||||
groups := make(map[string]*Group)
|
||||
|
||||
shouldRestore := !m.restored
|
||||
shouldRestore := !m.restored || m.restoreNewRuleGroups
|
||||
|
||||
for _, fn := range filenames {
|
||||
rgs, errs := m.opts.GroupLoader.Load(fn)
|
||||
rgs, errs := m.opts.GroupLoader.Load(fn, ignoreUnknownFields)
|
||||
if errs != nil {
|
||||
return nil, errs
|
||||
}
|
||||
|
@ -329,7 +341,7 @@ func (m *Manager) LoadGroups(
|
|||
labels.FromMap(r.Annotations),
|
||||
externalLabels,
|
||||
externalURL,
|
||||
m.restored,
|
||||
!shouldRestore,
|
||||
m.logger.With("alert", r.Alert),
|
||||
))
|
||||
continue
|
||||
|
@ -444,8 +456,8 @@ func SendAlerts(s Sender, externalURL string) NotifyFunc {
|
|||
// RuleDependencyController controls whether a set of rules have dependencies between each other.
|
||||
type RuleDependencyController interface {
|
||||
// AnalyseRules analyses dependencies between the input rules. For each rule that it's guaranteed
|
||||
// not having any dependants and/or dependency, this function should call Rule.SetNoDependentRules(true)
|
||||
// and/or Rule.SetNoDependencyRules(true).
|
||||
// not having any dependants and/or dependency, this function should call Rule.SetDependentRules(...)
|
||||
// and/or Rule.SetDependencyRules(...).
|
||||
AnalyseRules(rules []Rule)
|
||||
}
|
||||
|
||||
|
@ -460,15 +472,22 @@ func (c ruleDependencyController) AnalyseRules(rules []Rule) {
|
|||
}
|
||||
|
||||
for _, r := range rules {
|
||||
r.SetNoDependentRules(depMap.dependents(r) == 0)
|
||||
r.SetNoDependencyRules(depMap.dependencies(r) == 0)
|
||||
r.SetDependentRules(depMap.dependents(r))
|
||||
r.SetDependencyRules(depMap.dependencies(r))
|
||||
}
|
||||
}
|
||||
|
||||
// ConcurrentRules represents a slice of indexes of rules that can be evaluated concurrently.
|
||||
type ConcurrentRules []int
|
||||
|
||||
// RuleConcurrencyController controls concurrency for rules that are safe to be evaluated concurrently.
|
||||
// Its purpose is to bound the amount of concurrency in rule evaluations to avoid overwhelming the Prometheus
|
||||
// server with additional query load. Concurrency is controlled globally, not on a per-group basis.
|
||||
type RuleConcurrencyController interface {
|
||||
// SplitGroupIntoBatches returns an ordered slice of of ConcurrentRules, which are batches of rules that can be evaluated concurrently.
|
||||
// The rules are represented by their index from the input rule group.
|
||||
SplitGroupIntoBatches(ctx context.Context, group *Group) []ConcurrentRules
|
||||
|
||||
// Allow determines if the given rule is allowed to be evaluated concurrently.
|
||||
// If Allow() returns true, then Done() must be called to release the acquired slot and corresponding cleanup is done.
|
||||
// It is important that both *Group and Rule are not retained and only be used for the duration of the call.
|
||||
|
@ -490,21 +509,51 @@ func newRuleConcurrencyController(maxConcurrency int64) RuleConcurrencyControlle
|
|||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Allow(_ context.Context, _ *Group, rule Rule) bool {
|
||||
// To allow a rule to be executed concurrently, we need 3 conditions:
|
||||
// 1. The rule must not have any rules that depend on it.
|
||||
// 2. The rule itself must not depend on any other rules.
|
||||
// 3. If 1 & 2 are true, then and only then we should try to acquire the concurrency slot.
|
||||
if rule.NoDependentRules() && rule.NoDependencyRules() {
|
||||
return c.sema.TryAcquire(1)
|
||||
return c.sema.TryAcquire(1)
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules {
|
||||
// Using the rule dependency controller information (rules being identified as having no dependencies or no dependants),
|
||||
// we can safely run the following concurrent groups:
|
||||
// 1. Concurrently, all rules that have no dependencies
|
||||
// 2. Sequentially, all rules that have both dependencies and dependants
|
||||
// 3. Concurrently, all rules that have no dependants
|
||||
|
||||
var noDependencies []int
|
||||
var dependenciesAndDependants []int
|
||||
var noDependants []int
|
||||
|
||||
for i, r := range g.rules {
|
||||
switch {
|
||||
case r.NoDependencyRules():
|
||||
noDependencies = append(noDependencies, i)
|
||||
case !r.NoDependentRules() && !r.NoDependencyRules():
|
||||
dependenciesAndDependants = append(dependenciesAndDependants, i)
|
||||
case r.NoDependentRules():
|
||||
noDependants = append(noDependants, i)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
var order []ConcurrentRules
|
||||
if len(noDependencies) > 0 {
|
||||
order = append(order, noDependencies)
|
||||
}
|
||||
for _, r := range dependenciesAndDependants {
|
||||
order = append(order, []int{r})
|
||||
}
|
||||
if len(noDependants) > 0 {
|
||||
order = append(order, noDependants)
|
||||
}
|
||||
|
||||
return order
|
||||
}
|
||||
|
||||
func (c *concurrentRuleEvalController) Done(_ context.Context) {
|
||||
c.sema.Release(1)
|
||||
}
|
||||
|
||||
var _ RuleConcurrencyController = &sequentialRuleEvalController{}
|
||||
|
||||
// sequentialRuleEvalController is a RuleConcurrencyController that runs every rule sequentially.
|
||||
type sequentialRuleEvalController struct{}
|
||||
|
||||
|
@ -512,6 +561,10 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule)
|
|||
return false
|
||||
}
|
||||
|
||||
func (c sequentialRuleEvalController) SplitGroupIntoBatches(_ context.Context, g *Group) []ConcurrentRules {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c sequentialRuleEvalController) Done(_ context.Context) {}
|
||||
|
||||
// FromMaps returns new sorted Labels from the given maps, overriding each other in order.
|
||||
|
|
|
@ -808,7 +808,7 @@ func TestUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Groups will be recreated if updated.
|
||||
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
|
||||
rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml", false)
|
||||
require.Empty(t, errs, "file parsing failures")
|
||||
|
||||
tmpFile, err := os.CreateTemp("", "rules.test.*.yaml")
|
||||
|
@ -1423,8 +1423,6 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
|
|||
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
||||
evaluationDuration: atomic.NewDuration(0),
|
||||
lastError: atomic.NewError(nil),
|
||||
noDependentRules: atomic.NewBool(false),
|
||||
noDependencyRules: atomic.NewBool(false),
|
||||
}
|
||||
|
||||
group := NewGroup(GroupOptions{
|
||||
|
@ -1534,7 +1532,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci
|
|||
})
|
||||
|
||||
t.Run("load a mix of dependent and independent rules", func(t *testing.T) {
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -1569,7 +1567,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci
|
|||
})
|
||||
|
||||
t.Run("load only independent rules", func(t *testing.T) {
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_independent.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -1613,11 +1611,12 @@ func TestDependencyMap(t *testing.T) {
|
|||
depMap := buildDependencyMap(group.rules)
|
||||
|
||||
require.Zero(t, depMap.dependencies(rule))
|
||||
require.Equal(t, 2, depMap.dependents(rule))
|
||||
require.Equal(t, []Rule{rule2, rule4}, depMap.dependents(rule))
|
||||
require.Len(t, depMap.dependents(rule), 2)
|
||||
require.False(t, depMap.isIndependent(rule))
|
||||
|
||||
require.Zero(t, depMap.dependents(rule2))
|
||||
require.Equal(t, 1, depMap.dependencies(rule2))
|
||||
require.Equal(t, []Rule{rule}, depMap.dependencies(rule2))
|
||||
require.False(t, depMap.isIndependent(rule2))
|
||||
|
||||
require.Zero(t, depMap.dependents(rule3))
|
||||
|
@ -1625,7 +1624,7 @@ func TestDependencyMap(t *testing.T) {
|
|||
require.True(t, depMap.isIndependent(rule3))
|
||||
|
||||
require.Zero(t, depMap.dependents(rule4))
|
||||
require.Equal(t, 1, depMap.dependencies(rule4))
|
||||
require.Equal(t, []Rule{rule}, depMap.dependencies(rule4))
|
||||
require.False(t, depMap.isIndependent(rule4))
|
||||
}
|
||||
|
||||
|
@ -1958,7 +1957,8 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
|
|||
require.NotEqual(t, orig[h], depMap)
|
||||
// We expect there to be some dependencies since the new rule group contains a dependency.
|
||||
require.NotEmpty(t, depMap)
|
||||
require.Equal(t, 1, depMap.dependents(rr))
|
||||
require.Len(t, depMap.dependents(rr), 1)
|
||||
require.Equal(t, "HighRequestRate", depMap.dependents(rr)[0].Name())
|
||||
require.Zero(t, depMap.dependencies(rr))
|
||||
}
|
||||
}
|
||||
|
@ -1975,7 +1975,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
t.Cleanup(cancel)
|
||||
|
||||
ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, 0))
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -1987,6 +1987,10 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
start := time.Now()
|
||||
DefaultEvalIterationFunc(ctx, group, start)
|
||||
|
||||
// Expected evaluation order
|
||||
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
|
||||
require.Nil(t, order)
|
||||
|
||||
// Never expect more than 1 inflight query at a time.
|
||||
require.EqualValues(t, 1, maxInflight.Load())
|
||||
// Each rule should take at least 1 second to execute sequentially.
|
||||
|
@ -2017,7 +2021,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
opts.RuleConcurrencyController = nil
|
||||
ruleManager := NewManager(opts)
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -2055,7 +2059,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
opts.RuleConcurrencyController = nil
|
||||
ruleManager := NewManager(opts)
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_independent.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -2065,6 +2069,12 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
start := time.Now()
|
||||
DefaultEvalIterationFunc(ctx, group, start)
|
||||
|
||||
// Expected evaluation order (isn't affected by concurrency settings)
|
||||
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
|
||||
require.Equal(t, []ConcurrentRules{
|
||||
{0, 1, 2, 3, 4, 5},
|
||||
}, order)
|
||||
|
||||
// Max inflight can be 1 synchronous eval and up to MaxConcurrentEvals concurrent evals.
|
||||
require.EqualValues(t, opts.MaxConcurrentEvals+1, maxInflight.Load())
|
||||
// Some rules should execute concurrently so should complete quicker.
|
||||
|
@ -2093,7 +2103,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
opts.RuleConcurrencyController = nil
|
||||
ruleManager := NewManager(opts)
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_multiple_independent.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_independent.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -2104,6 +2114,12 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
|
||||
DefaultEvalIterationFunc(ctx, group, start)
|
||||
|
||||
// Expected evaluation order
|
||||
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
|
||||
require.Equal(t, []ConcurrentRules{
|
||||
{0, 1, 2, 3, 4, 5},
|
||||
}, order)
|
||||
|
||||
// Max inflight can be up to MaxConcurrentEvals concurrent evals, since there is sufficient concurrency to run all rules at once.
|
||||
require.LessOrEqual(t, int64(maxInflight.Load()), opts.MaxConcurrentEvals)
|
||||
// Some rules should execute concurrently so should complete quicker.
|
||||
|
@ -2134,7 +2150,7 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
opts.RuleConcurrencyController = nil
|
||||
ruleManager := NewManager(opts)
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, []string{"fixtures/rules_indeterminates.yaml"}...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_indeterminates.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -2153,6 +2169,232 @@ func TestAsyncRuleEvaluation(t *testing.T) {
|
|||
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("asynchronous evaluation of rules that benefit from reordering", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
inflightQueries := atomic.Int32{}
|
||||
maxInflight := atomic.Int32{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
ruleCount := 8
|
||||
opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
|
||||
|
||||
// Configure concurrency settings.
|
||||
opts.ConcurrentEvalsEnabled = true
|
||||
opts.MaxConcurrentEvals = int64(ruleCount) * 2
|
||||
opts.RuleConcurrencyController = nil
|
||||
ruleManager := NewManager(opts)
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_multiple_dependents_on_base.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
var group *Group
|
||||
for _, g := range groups {
|
||||
group = g
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Expected evaluation order
|
||||
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
|
||||
require.Equal(t, []ConcurrentRules{
|
||||
{0, 4},
|
||||
{1, 2, 3, 5, 6, 7},
|
||||
}, order)
|
||||
|
||||
group.Eval(ctx, start)
|
||||
|
||||
// Inflight queries should be equal to 6. This is the size of the second batch of rules that can be executed concurrently.
|
||||
require.EqualValues(t, 6, maxInflight.Load())
|
||||
// Some rules should execute concurrently so should complete quicker.
|
||||
require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
|
||||
// Each rule produces one vector.
|
||||
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
|
||||
})
|
||||
|
||||
t.Run("attempted asynchronous evaluation of chained rules", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
inflightQueries := atomic.Int32{}
|
||||
maxInflight := atomic.Int32{}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
ruleCount := 7
|
||||
opts := optsFactory(storage, &maxInflight, &inflightQueries, 0)
|
||||
|
||||
// Configure concurrency settings.
|
||||
opts.ConcurrentEvalsEnabled = true
|
||||
opts.MaxConcurrentEvals = int64(ruleCount) * 2
|
||||
opts.RuleConcurrencyController = nil
|
||||
ruleManager := NewManager(opts)
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, []string{"fixtures/rules_chain.yaml"}...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
var group *Group
|
||||
for _, g := range groups {
|
||||
group = g
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Expected evaluation order
|
||||
order := group.opts.RuleConcurrencyController.SplitGroupIntoBatches(ctx, group)
|
||||
require.Equal(t, []ConcurrentRules{
|
||||
{0, 1},
|
||||
{2},
|
||||
{3},
|
||||
{4, 5, 6},
|
||||
}, order)
|
||||
|
||||
group.Eval(ctx, start)
|
||||
|
||||
require.EqualValues(t, 3, maxInflight.Load())
|
||||
// Some rules should execute concurrently so should complete quicker.
|
||||
require.Less(t, time.Since(start).Seconds(), (time.Duration(ruleCount) * artificialDelay).Seconds())
|
||||
// Each rule produces one vector.
|
||||
require.EqualValues(t, ruleCount, testutil.ToFloat64(group.metrics.GroupSamples))
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewRuleGroupRestoration(t *testing.T) {
|
||||
store := teststorage.New(t)
|
||||
t.Cleanup(func() { store.Close() })
|
||||
var (
|
||||
inflightQueries atomic.Int32
|
||||
maxInflight atomic.Int32
|
||||
maxConcurrency int64
|
||||
interval = 60 * time.Second
|
||||
)
|
||||
|
||||
waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) {
|
||||
for {
|
||||
select {
|
||||
case cnt := <-ch:
|
||||
if cnt == targetCount {
|
||||
return
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
files := []string{"fixtures/alert_rule.yaml"}
|
||||
|
||||
option := optsFactory(store, &maxInflight, &inflightQueries, maxConcurrency)
|
||||
option.Queryable = store
|
||||
option.Appendable = store
|
||||
option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {}
|
||||
|
||||
var evalCount atomic.Int32
|
||||
ch := make(chan int32)
|
||||
noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) {
|
||||
evalCount.Inc()
|
||||
ch <- evalCount.Load()
|
||||
}
|
||||
|
||||
ruleManager := NewManager(option)
|
||||
go ruleManager.Run()
|
||||
err := ruleManager.Update(interval, files, labels.EmptyLabels(), "", noopEvalIterFunc)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitForEvaluations(t, ch, 3)
|
||||
require.Equal(t, int32(3), evalCount.Load())
|
||||
ruleGroups := make(map[string]struct{})
|
||||
for _, group := range ruleManager.groups {
|
||||
ruleGroups[group.Name()] = struct{}{}
|
||||
require.False(t, group.shouldRestore)
|
||||
for _, rule := range group.rules {
|
||||
require.True(t, rule.(*AlertingRule).restored.Load())
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, "fixtures/alert_rule1.yaml")
|
||||
err = ruleManager.Update(interval, files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
ruleManager.Stop()
|
||||
for _, group := range ruleManager.groups {
|
||||
// new rule groups added to existing manager will not be restored
|
||||
require.False(t, group.shouldRestore)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRuleGroupRestorationWithRestoreNewGroupOption(t *testing.T) {
|
||||
store := teststorage.New(t)
|
||||
t.Cleanup(func() { store.Close() })
|
||||
var (
|
||||
inflightQueries atomic.Int32
|
||||
maxInflight atomic.Int32
|
||||
maxConcurrency int64
|
||||
interval = 60 * time.Second
|
||||
)
|
||||
|
||||
waitForEvaluations := func(t *testing.T, ch <-chan int32, targetCount int32) {
|
||||
for {
|
||||
select {
|
||||
case cnt := <-ch:
|
||||
if cnt == targetCount {
|
||||
return
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
files := []string{"fixtures/alert_rule.yaml"}
|
||||
|
||||
option := optsFactory(store, &maxInflight, &inflightQueries, maxConcurrency)
|
||||
option.Queryable = store
|
||||
option.Appendable = store
|
||||
option.RestoreNewRuleGroups = true
|
||||
option.NotifyFunc = func(ctx context.Context, expr string, alerts ...*Alert) {}
|
||||
|
||||
var evalCount atomic.Int32
|
||||
ch := make(chan int32)
|
||||
noopEvalIterFunc := func(ctx context.Context, g *Group, evalTimestamp time.Time) {
|
||||
evalCount.Inc()
|
||||
ch <- evalCount.Load()
|
||||
}
|
||||
|
||||
ruleManager := NewManager(option)
|
||||
go ruleManager.Run()
|
||||
err := ruleManager.Update(interval, files, labels.EmptyLabels(), "", noopEvalIterFunc)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitForEvaluations(t, ch, 3)
|
||||
require.Equal(t, int32(3), evalCount.Load())
|
||||
ruleGroups := make(map[string]struct{})
|
||||
for _, group := range ruleManager.groups {
|
||||
ruleGroups[group.Name()] = struct{}{}
|
||||
require.False(t, group.shouldRestore)
|
||||
for _, rule := range group.rules {
|
||||
require.True(t, rule.(*AlertingRule).restored.Load())
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, "fixtures/alert_rule1.yaml")
|
||||
err = ruleManager.Update(interval, files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
// stop eval
|
||||
ruleManager.Stop()
|
||||
for _, group := range ruleManager.groups {
|
||||
if _, OK := ruleGroups[group.Name()]; OK {
|
||||
// already restored
|
||||
require.False(t, group.shouldRestore)
|
||||
continue
|
||||
}
|
||||
// new rule groups added to existing manager will be restored
|
||||
require.True(t, group.shouldRestore)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
||||
|
@ -2170,7 +2412,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) {
|
|||
|
||||
ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, maxConcurrency))
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, files...)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, files...)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, groupCount)
|
||||
|
||||
|
@ -2212,6 +2454,41 @@ func TestUpdateWhenStopped(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGroup_Eval_RaceConditionOnStoppingGroupEvaluationWhileRulesAreEvaluatedConcurrently(t *testing.T) {
|
||||
storage := teststorage.New(t)
|
||||
t.Cleanup(func() { storage.Close() })
|
||||
|
||||
var (
|
||||
inflightQueries atomic.Int32
|
||||
maxInflight atomic.Int32
|
||||
maxConcurrency int64 = 10
|
||||
)
|
||||
|
||||
files := []string{"fixtures/rules_multiple_groups.yaml"}
|
||||
files2 := []string{"fixtures/rules.yaml"}
|
||||
|
||||
ruleManager := NewManager(optsFactory(storage, &maxInflight, &inflightQueries, maxConcurrency))
|
||||
go func() {
|
||||
ruleManager.Run()
|
||||
}()
|
||||
<-ruleManager.block
|
||||
|
||||
// Update the group a decent number of times to simulate start and stopping in the middle of an evaluation.
|
||||
for i := 0; i < 10; i++ {
|
||||
err := ruleManager.Update(time.Second, files, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait half of the query execution duration and then change the rule groups loaded by the manager
|
||||
// so that the previous rule group will be interrupted while the query is executing.
|
||||
time.Sleep(artificialDelay / 2)
|
||||
|
||||
err = ruleManager.Update(time.Second, files2, labels.EmptyLabels(), "", nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ruleManager.Stop()
|
||||
}
|
||||
|
||||
const artificialDelay = 250 * time.Millisecond
|
||||
|
||||
func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.Int32, maxConcurrent int64) *ManagerOptions {
|
||||
|
@ -2377,7 +2654,7 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) {
|
|||
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
|
||||
})
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, tc.ruleFile)
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, tc.ruleFile)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, groups, 1)
|
||||
|
||||
|
@ -2394,3 +2671,26 @@ func TestRuleDependencyController_AnalyseRules(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) {
|
||||
storage := teststorage.New(b)
|
||||
b.Cleanup(func() { storage.Close() })
|
||||
|
||||
ruleManager := NewManager(&ManagerOptions{
|
||||
Context: context.Background(),
|
||||
Logger: promslog.NewNopLogger(),
|
||||
Appendable: storage,
|
||||
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
|
||||
})
|
||||
|
||||
groups, errs := ruleManager.LoadGroups(time.Second, labels.EmptyLabels(), "", nil, false, "fixtures/rules_multiple.yaml")
|
||||
require.Empty(b, errs)
|
||||
require.Len(b, groups, 1)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, g := range groups {
|
||||
ruleManager.opts.RuleDependencyController.AnalyseRules(g.rules)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,10 +45,12 @@ func (u unknownRule) SetEvaluationDuration(time.Duration) {}
|
|||
func (u unknownRule) GetEvaluationDuration() time.Duration { return 0 }
|
||||
func (u unknownRule) SetEvaluationTimestamp(time.Time) {}
|
||||
func (u unknownRule) GetEvaluationTimestamp() time.Time { return time.Time{} }
|
||||
func (u unknownRule) SetNoDependentRules(bool) {}
|
||||
func (u unknownRule) SetDependentRules([]Rule) {}
|
||||
func (u unknownRule) NoDependentRules() bool { return false }
|
||||
func (u unknownRule) SetNoDependencyRules(bool) {}
|
||||
func (u unknownRule) DependentRules() []Rule { return nil }
|
||||
func (u unknownRule) SetDependencyRules([]Rule) {}
|
||||
func (u unknownRule) NoDependencyRules() bool { return false }
|
||||
func (u unknownRule) DependencyRules() []Rule { return nil }
|
||||
|
||||
func TestNewRuleDetailPanics(t *testing.T) {
|
||||
require.PanicsWithValue(t, `unknown rule type "rules.unknownRule"`, func() {
|
||||
|
@ -76,12 +78,12 @@ func TestNewRuleDetail(t *testing.T) {
|
|||
require.False(t, detail.NoDependentRules)
|
||||
require.False(t, detail.NoDependencyRules)
|
||||
|
||||
rule.SetNoDependentRules(true)
|
||||
rule.SetDependentRules([]Rule{})
|
||||
detail = NewRuleDetail(rule)
|
||||
require.True(t, detail.NoDependentRules)
|
||||
require.False(t, detail.NoDependencyRules)
|
||||
|
||||
rule.SetNoDependencyRules(true)
|
||||
rule.SetDependencyRules([]Rule{})
|
||||
detail = NewRuleDetail(rule)
|
||||
require.True(t, detail.NoDependentRules)
|
||||
require.True(t, detail.NoDependencyRules)
|
||||
|
@ -104,12 +106,12 @@ func TestNewRuleDetail(t *testing.T) {
|
|||
require.False(t, detail.NoDependentRules)
|
||||
require.False(t, detail.NoDependencyRules)
|
||||
|
||||
rule.SetNoDependentRules(true)
|
||||
rule.SetDependentRules([]Rule{})
|
||||
detail = NewRuleDetail(rule)
|
||||
require.True(t, detail.NoDependentRules)
|
||||
require.False(t, detail.NoDependencyRules)
|
||||
|
||||
rule.SetNoDependencyRules(true)
|
||||
rule.SetDependencyRules([]Rule{})
|
||||
detail = NewRuleDetail(rule)
|
||||
require.True(t, detail.NoDependentRules)
|
||||
require.True(t, detail.NoDependencyRules)
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
|
@ -43,8 +44,9 @@ type RecordingRule struct {
|
|||
// Duration of how long it took to evaluate the recording rule.
|
||||
evaluationDuration *atomic.Duration
|
||||
|
||||
noDependentRules *atomic.Bool
|
||||
noDependencyRules *atomic.Bool
|
||||
dependenciesMutex sync.RWMutex
|
||||
dependentRules []Rule
|
||||
dependencyRules []Rule
|
||||
}
|
||||
|
||||
// NewRecordingRule returns a new recording rule.
|
||||
|
@ -57,8 +59,6 @@ func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *Reco
|
|||
evaluationTimestamp: atomic.NewTime(time.Time{}),
|
||||
evaluationDuration: atomic.NewDuration(0),
|
||||
lastError: atomic.NewError(nil),
|
||||
noDependentRules: atomic.NewBool(false),
|
||||
noDependencyRules: atomic.NewBool(false),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,18 +172,52 @@ func (rule *RecordingRule) GetEvaluationTimestamp() time.Time {
|
|||
return rule.evaluationTimestamp.Load()
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) SetNoDependentRules(noDependentRules bool) {
|
||||
rule.noDependentRules.Store(noDependentRules)
|
||||
func (rule *RecordingRule) SetDependentRules(dependents []Rule) {
|
||||
rule.dependenciesMutex.Lock()
|
||||
defer rule.dependenciesMutex.Unlock()
|
||||
|
||||
rule.dependentRules = make([]Rule, len(dependents))
|
||||
copy(rule.dependentRules, dependents)
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) NoDependentRules() bool {
|
||||
return rule.noDependentRules.Load()
|
||||
rule.dependenciesMutex.RLock()
|
||||
defer rule.dependenciesMutex.RUnlock()
|
||||
|
||||
if rule.dependentRules == nil {
|
||||
return false // We don't know if there are dependent rules.
|
||||
}
|
||||
|
||||
return len(rule.dependentRules) == 0
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) SetNoDependencyRules(noDependencyRules bool) {
|
||||
rule.noDependencyRules.Store(noDependencyRules)
|
||||
func (rule *RecordingRule) DependentRules() []Rule {
|
||||
rule.dependenciesMutex.RLock()
|
||||
defer rule.dependenciesMutex.RUnlock()
|
||||
return rule.dependentRules
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) SetDependencyRules(dependencies []Rule) {
|
||||
rule.dependenciesMutex.Lock()
|
||||
defer rule.dependenciesMutex.Unlock()
|
||||
|
||||
rule.dependencyRules = make([]Rule, len(dependencies))
|
||||
copy(rule.dependencyRules, dependencies)
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) NoDependencyRules() bool {
|
||||
return rule.noDependencyRules.Load()
|
||||
rule.dependenciesMutex.RLock()
|
||||
defer rule.dependenciesMutex.RUnlock()
|
||||
|
||||
if rule.dependencyRules == nil {
|
||||
return false // We don't know if there are dependency rules.
|
||||
}
|
||||
|
||||
return len(rule.dependencyRules) == 0
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) DependencyRules() []Rule {
|
||||
rule.dependenciesMutex.RLock()
|
||||
defer rule.dependenciesMutex.RUnlock()
|
||||
return rule.dependencyRules
|
||||
}
|
||||
|
|
|
@ -255,24 +255,32 @@ func TestRecordingEvalWithOrigin(t *testing.T) {
|
|||
require.Equal(t, detail, NewRuleDetail(rule))
|
||||
}
|
||||
|
||||
func TestRecordingRule_SetNoDependentRules(t *testing.T) {
|
||||
func TestRecordingRule_SetDependentRules(t *testing.T) {
|
||||
dependentRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
|
||||
|
||||
rule := NewRecordingRule("1", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels())
|
||||
require.False(t, rule.NoDependentRules())
|
||||
|
||||
rule.SetNoDependentRules(false)
|
||||
rule.SetDependentRules([]Rule{dependentRule})
|
||||
require.False(t, rule.NoDependentRules())
|
||||
require.Equal(t, []Rule{dependentRule}, rule.DependentRules())
|
||||
|
||||
rule.SetNoDependentRules(true)
|
||||
rule.SetDependentRules([]Rule{})
|
||||
require.True(t, rule.NoDependentRules())
|
||||
require.Empty(t, rule.DependentRules())
|
||||
}
|
||||
|
||||
func TestRecordingRule_SetNoDependencyRules(t *testing.T) {
|
||||
func TestRecordingRule_SetDependencyRules(t *testing.T) {
|
||||
dependencyRule := NewRecordingRule("test1", nil, labels.EmptyLabels())
|
||||
|
||||
rule := NewRecordingRule("1", &parser.NumberLiteral{Val: 1}, labels.EmptyLabels())
|
||||
require.False(t, rule.NoDependencyRules())
|
||||
|
||||
rule.SetNoDependencyRules(false)
|
||||
rule.SetDependencyRules([]Rule{dependencyRule})
|
||||
require.False(t, rule.NoDependencyRules())
|
||||
require.Equal(t, []Rule{dependencyRule}, rule.DependencyRules())
|
||||
|
||||
rule.SetNoDependencyRules(true)
|
||||
rule.SetDependencyRules([]Rule{})
|
||||
require.True(t, rule.NoDependencyRules())
|
||||
require.Empty(t, rule.DependencyRules())
|
||||
}
|
||||
|
|
|
@ -62,19 +62,25 @@ type Rule interface {
|
|||
// NOTE: Used dynamically by rules.html template.
|
||||
GetEvaluationTimestamp() time.Time
|
||||
|
||||
// SetNoDependentRules sets whether there's no other rule in the rule group that depends on this rule.
|
||||
SetNoDependentRules(bool)
|
||||
// SetDependentRules sets rules which depend on the output of this rule.
|
||||
SetDependentRules(rules []Rule)
|
||||
|
||||
// NoDependentRules returns true if it's guaranteed that in the rule group there's no other rule
|
||||
// which depends on this one. In case this function returns false there's no such guarantee, which
|
||||
// means there may or may not be other rules depending on this one.
|
||||
NoDependentRules() bool
|
||||
|
||||
// SetNoDependencyRules sets whether this rule doesn't depend on the output of any rule in the rule group.
|
||||
SetNoDependencyRules(bool)
|
||||
// DependentRules returns the rules which depend on the output of this rule.
|
||||
DependentRules() []Rule
|
||||
|
||||
// SetDependencyRules sets rules on which this rule depends.
|
||||
SetDependencyRules(rules []Rule)
|
||||
|
||||
// NoDependencyRules returns true if it's guaranteed that this rule doesn't depend on the output of
|
||||
// any other rule in the group. In case this function returns false there's no such guarantee, which
|
||||
// means the rule may or may not depend on other rules.
|
||||
NoDependencyRules() bool
|
||||
|
||||
// DependencyRules returns the rules on which this rule depends.
|
||||
DependencyRules() []Rule
|
||||
}
|
||||
|
|
|
@ -46,15 +46,15 @@ type nopAppender struct{}
|
|||
func (a nopAppender) SetOptions(opts *storage.AppendOptions) {}
|
||||
|
||||
func (a nopAppender) Append(storage.SeriesRef, labels.Labels, int64, float64) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) AppendExemplar(storage.SeriesRef, labels.Labels, exemplar.Exemplar) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
return 2, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
return 3, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
|
@ -62,11 +62,11 @@ func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels
|
|||
}
|
||||
|
||||
func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
return 4, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) AppendCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64) (storage.SeriesRef, error) {
|
||||
return 0, nil
|
||||
return 5, nil
|
||||
}
|
||||
|
||||
func (a nopAppender) Commit() error { return nil }
|
||||
|
@ -90,6 +90,27 @@ type histogramSample struct {
|
|||
fh *histogram.FloatHistogram
|
||||
}
|
||||
|
||||
type metadataEntry struct {
|
||||
m metadata.Metadata
|
||||
metric labels.Labels
|
||||
}
|
||||
|
||||
func metadataEntryEqual(a, b metadataEntry) bool {
|
||||
if !labels.Equal(a.metric, b.metric) {
|
||||
return false
|
||||
}
|
||||
if a.m.Type != b.m.Type {
|
||||
return false
|
||||
}
|
||||
if a.m.Unit != b.m.Unit {
|
||||
return false
|
||||
}
|
||||
if a.m.Help != b.m.Help {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type collectResultAppendable struct {
|
||||
*collectResultAppender
|
||||
}
|
||||
|
@ -112,8 +133,8 @@ type collectResultAppender struct {
|
|||
rolledbackHistograms []histogramSample
|
||||
resultExemplars []exemplar.Exemplar
|
||||
pendingExemplars []exemplar.Exemplar
|
||||
resultMetadata []metadata.Metadata
|
||||
pendingMetadata []metadata.Metadata
|
||||
resultMetadata []metadataEntry
|
||||
pendingMetadata []metadataEntry
|
||||
}
|
||||
|
||||
func (a *collectResultAppender) SetOptions(opts *storage.AppendOptions) {}
|
||||
|
@ -173,7 +194,7 @@ func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRe
|
|||
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||
a.mtx.Lock()
|
||||
defer a.mtx.Unlock()
|
||||
a.pendingMetadata = append(a.pendingMetadata, m)
|
||||
a.pendingMetadata = append(a.pendingMetadata, metadataEntry{metric: l, m: m})
|
||||
if ref == 0 {
|
||||
ref = storage.SeriesRef(rand.Uint64())
|
||||
}
|
||||
|
|
338
scrape/scrape.go
338
scrape/scrape.go
|
@ -29,6 +29,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/klauspost/compress/gzip"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
|
@ -931,6 +932,7 @@ type scrapeLoop struct {
|
|||
// scrapeCache tracks mappings of exposed metric strings to label sets and
|
||||
// storage references. Additionally, it tracks staleness of series between
|
||||
// scrapes.
|
||||
// Cache is meant to be used per a single target.
|
||||
type scrapeCache struct {
|
||||
iter uint64 // Current scrape iteration.
|
||||
|
||||
|
@ -951,8 +953,10 @@ type scrapeCache struct {
|
|||
seriesCur map[uint64]labels.Labels
|
||||
seriesPrev map[uint64]labels.Labels
|
||||
|
||||
metaMtx sync.Mutex
|
||||
metadata map[string]*metaEntry
|
||||
// TODO(bwplotka): Consider moving Metadata API to use WAL instead of scrape loop to
|
||||
// avoid locking (using metadata API can block scraping).
|
||||
metaMtx sync.Mutex // Mutex is needed due to api touching it when metadata is queried.
|
||||
metadata map[string]*metaEntry // metadata by metric family name.
|
||||
|
||||
metrics *scrapeMetrics
|
||||
}
|
||||
|
@ -1078,73 +1082,79 @@ func (c *scrapeCache) forEachStale(f func(labels.Labels) bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *scrapeCache) setType(metric []byte, t model.MetricType) {
|
||||
c.metaMtx.Lock()
|
||||
func yoloString(b []byte) string {
|
||||
return unsafe.String(unsafe.SliceData(b), len(b))
|
||||
}
|
||||
|
||||
e, ok := c.metadata[string(metric)]
|
||||
func (c *scrapeCache) setType(mfName []byte, t model.MetricType) ([]byte, *metaEntry) {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
||||
e, ok := c.metadata[yoloString(mfName)]
|
||||
if !ok {
|
||||
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
||||
c.metadata[string(metric)] = e
|
||||
c.metadata[string(mfName)] = e
|
||||
}
|
||||
if e.Type != t {
|
||||
e.Type = t
|
||||
e.lastIterChange = c.iter
|
||||
}
|
||||
e.lastIter = c.iter
|
||||
|
||||
c.metaMtx.Unlock()
|
||||
return mfName, e
|
||||
}
|
||||
|
||||
func (c *scrapeCache) setHelp(metric, help []byte) {
|
||||
func (c *scrapeCache) setHelp(mfName, help []byte) ([]byte, *metaEntry) {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
||||
e, ok := c.metadata[string(metric)]
|
||||
e, ok := c.metadata[yoloString(mfName)]
|
||||
if !ok {
|
||||
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
||||
c.metadata[string(metric)] = e
|
||||
c.metadata[string(mfName)] = e
|
||||
}
|
||||
if e.Help != string(help) {
|
||||
e.Help = string(help)
|
||||
e.lastIterChange = c.iter
|
||||
}
|
||||
e.lastIter = c.iter
|
||||
|
||||
c.metaMtx.Unlock()
|
||||
return mfName, e
|
||||
}
|
||||
|
||||
func (c *scrapeCache) setUnit(metric, unit []byte) {
|
||||
func (c *scrapeCache) setUnit(mfName, unit []byte) ([]byte, *metaEntry) {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
||||
e, ok := c.metadata[string(metric)]
|
||||
e, ok := c.metadata[yoloString(mfName)]
|
||||
if !ok {
|
||||
e = &metaEntry{Metadata: metadata.Metadata{Type: model.MetricTypeUnknown}}
|
||||
c.metadata[string(metric)] = e
|
||||
c.metadata[string(mfName)] = e
|
||||
}
|
||||
if e.Unit != string(unit) {
|
||||
e.Unit = string(unit)
|
||||
e.lastIterChange = c.iter
|
||||
}
|
||||
e.lastIter = c.iter
|
||||
|
||||
c.metaMtx.Unlock()
|
||||
return mfName, e
|
||||
}
|
||||
|
||||
func (c *scrapeCache) GetMetadata(metric string) (MetricMetadata, bool) {
|
||||
// GetMetadata returns metadata given the metric family name.
|
||||
func (c *scrapeCache) GetMetadata(mfName string) (MetricMetadata, bool) {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
||||
m, ok := c.metadata[metric]
|
||||
m, ok := c.metadata[mfName]
|
||||
if !ok {
|
||||
return MetricMetadata{}, false
|
||||
}
|
||||
return MetricMetadata{
|
||||
Metric: metric,
|
||||
Type: m.Type,
|
||||
Help: m.Help,
|
||||
Unit: m.Unit,
|
||||
MetricFamily: mfName,
|
||||
Type: m.Type,
|
||||
Help: m.Help,
|
||||
Unit: m.Unit,
|
||||
}, true
|
||||
}
|
||||
|
||||
// ListMetadata lists metadata.
|
||||
func (c *scrapeCache) ListMetadata() []MetricMetadata {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
@ -1153,16 +1163,16 @@ func (c *scrapeCache) ListMetadata() []MetricMetadata {
|
|||
|
||||
for m, e := range c.metadata {
|
||||
res = append(res, MetricMetadata{
|
||||
Metric: m,
|
||||
Type: e.Type,
|
||||
Help: e.Help,
|
||||
Unit: e.Unit,
|
||||
MetricFamily: m,
|
||||
Type: e.Type,
|
||||
Help: e.Help,
|
||||
Unit: e.Unit,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// MetadataSize returns the size of the metadata cache.
|
||||
// SizeMetadata returns the size of the metadata cache.
|
||||
func (c *scrapeCache) SizeMetadata() (s int) {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
@ -1173,7 +1183,7 @@ func (c *scrapeCache) SizeMetadata() (s int) {
|
|||
return s
|
||||
}
|
||||
|
||||
// MetadataLen returns the number of metadata entries in the cache.
|
||||
// LengthMetadata returns the number of metadata entries in the cache.
|
||||
func (c *scrapeCache) LengthMetadata() int {
|
||||
c.metaMtx.Lock()
|
||||
defer c.metaMtx.Unlock()
|
||||
|
@ -1607,39 +1617,17 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string,
|
|||
)
|
||||
}
|
||||
var (
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
bucketLimitErr error
|
||||
lset labels.Labels // escapes to heap so hoisted out of loop
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
meta metadata.Metadata
|
||||
metadataChanged bool
|
||||
appErrs = appendErrors{}
|
||||
sampleLimitErr error
|
||||
bucketLimitErr error
|
||||
lset labels.Labels // escapes to heap so hoisted out of loop
|
||||
e exemplar.Exemplar // escapes to heap so hoisted out of loop
|
||||
lastMeta *metaEntry
|
||||
lastMFName []byte
|
||||
)
|
||||
|
||||
exemplars := make([]exemplar.Exemplar, 0, 1)
|
||||
|
||||
// updateMetadata updates the current iteration's metadata object and the
|
||||
// metadataChanged value if we have metadata in the scrape cache AND the
|
||||
// labelset is for a new series or the metadata for this series has just
|
||||
// changed. It returns a boolean based on whether the metadata was updated.
|
||||
updateMetadata := func(lset labels.Labels, isNewSeries bool) bool {
|
||||
if !sl.appendMetadataToWAL {
|
||||
return false
|
||||
}
|
||||
|
||||
sl.cache.metaMtx.Lock()
|
||||
defer sl.cache.metaMtx.Unlock()
|
||||
metaEntry, metaOk := sl.cache.metadata[lset.Get(labels.MetricName)]
|
||||
if metaOk && (isNewSeries || metaEntry.lastIterChange == sl.cache.iter) {
|
||||
metadataChanged = true
|
||||
meta.Type = metaEntry.Type
|
||||
meta.Unit = metaEntry.Unit
|
||||
meta.Help = metaEntry.Help
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Take an appender with limits.
|
||||
app = appender(app, sl.sampleLimit, sl.bucketLimit, sl.maxSchema)
|
||||
|
||||
|
@ -1669,14 +1657,18 @@ loop:
|
|||
break
|
||||
}
|
||||
switch et {
|
||||
// TODO(bwplotka): Consider changing parser to give metadata at once instead of type, help and unit in separation, ideally on `Series()/Histogram()
|
||||
// otherwise we can expose metadata without series on metadata API.
|
||||
case textparse.EntryType:
|
||||
sl.cache.setType(p.Type())
|
||||
// TODO(bwplotka): Build meta entry directly instead of locking and updating the map. This will
|
||||
// allow to properly update metadata when e.g unit was added, then removed;
|
||||
lastMFName, lastMeta = sl.cache.setType(p.Type())
|
||||
continue
|
||||
case textparse.EntryHelp:
|
||||
sl.cache.setHelp(p.Help())
|
||||
lastMFName, lastMeta = sl.cache.setHelp(p.Help())
|
||||
continue
|
||||
case textparse.EntryUnit:
|
||||
sl.cache.setUnit(p.Unit())
|
||||
lastMFName, lastMeta = sl.cache.setUnit(p.Unit())
|
||||
continue
|
||||
case textparse.EntryComment:
|
||||
continue
|
||||
|
@ -1699,26 +1691,19 @@ loop:
|
|||
t = *parsedTimestamp
|
||||
}
|
||||
|
||||
// Zero metadata out for current iteration until it's resolved.
|
||||
meta = metadata.Metadata{}
|
||||
metadataChanged = false
|
||||
|
||||
if sl.cache.getDropped(met) {
|
||||
continue
|
||||
}
|
||||
ce, ok, seriesAlreadyScraped := sl.cache.get(met)
|
||||
ce, seriesCached, seriesAlreadyScraped := sl.cache.get(met)
|
||||
var (
|
||||
ref storage.SeriesRef
|
||||
hash uint64
|
||||
)
|
||||
|
||||
if ok {
|
||||
if seriesCached {
|
||||
ref = ce.ref
|
||||
lset = ce.lset
|
||||
hash = ce.hash
|
||||
|
||||
// Update metadata only if it changed in the current iteration.
|
||||
updateMetadata(lset, false)
|
||||
} else {
|
||||
p.Metric(&lset)
|
||||
hash = lset.Hash()
|
||||
|
@ -1747,9 +1732,6 @@ loop:
|
|||
sl.metrics.targetScrapePoolExceededLabelLimits.Inc()
|
||||
break loop
|
||||
}
|
||||
|
||||
// Append metadata for new series if they were present.
|
||||
updateMetadata(lset, true)
|
||||
}
|
||||
|
||||
if seriesAlreadyScraped && parsedTimestamp == nil {
|
||||
|
@ -1799,7 +1781,7 @@ loop:
|
|||
break loop
|
||||
}
|
||||
|
||||
if !ok {
|
||||
if !seriesCached {
|
||||
if parsedTimestamp == nil || sl.trackTimestampsStaleness {
|
||||
// Bypass staleness logic if there is an explicit timestamp.
|
||||
sl.cache.trackStaleness(hash, lset)
|
||||
|
@ -1857,10 +1839,18 @@ loop:
|
|||
sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars))
|
||||
}
|
||||
|
||||
if sl.appendMetadataToWAL && metadataChanged {
|
||||
if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil {
|
||||
// No need to fail the scrape on errors appending metadata.
|
||||
sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr)
|
||||
if sl.appendMetadataToWAL && lastMeta != nil {
|
||||
// Is it new series OR did metadata change for this family?
|
||||
if !seriesCached || lastMeta.lastIterChange == sl.cache.iter {
|
||||
// In majority cases we can trust that the current series/histogram is matching the lastMeta and lastMFName.
|
||||
// However, optional TYPE etc metadata and broken OM text can break this, detect those cases here.
|
||||
// TODO(bwplotka): Consider moving this to parser as many parser users end up doing this (e.g. CT and NHCB parsing).
|
||||
if isSeriesPartOfFamily(lset.Get(labels.MetricName), lastMFName, lastMeta.Type) {
|
||||
if _, merr := app.UpdateMetadata(ref, lset, lastMeta.Metadata); merr != nil {
|
||||
// No need to fail the scrape on errors appending metadata.
|
||||
sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", lastMeta.Metadata), "err", merr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1896,6 +1886,71 @@ loop:
|
|||
return
|
||||
}
|
||||
|
||||
func isSeriesPartOfFamily(mName string, mfName []byte, typ model.MetricType) bool {
|
||||
mfNameStr := yoloString(mfName)
|
||||
if !strings.HasPrefix(mName, mfNameStr) { // Fast path.
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
gotMFName string
|
||||
ok bool
|
||||
)
|
||||
switch typ {
|
||||
case model.MetricTypeCounter:
|
||||
// Prometheus allows _total, cut it from mf name to support this case.
|
||||
mfNameStr, _ = strings.CutSuffix(mfNameStr, "_total")
|
||||
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_total")
|
||||
if !ok {
|
||||
gotMFName = mName
|
||||
}
|
||||
case model.MetricTypeHistogram:
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_bucket")
|
||||
if !ok {
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_sum")
|
||||
if !ok {
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_count")
|
||||
if !ok {
|
||||
gotMFName = mName
|
||||
}
|
||||
}
|
||||
}
|
||||
case model.MetricTypeGaugeHistogram:
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_bucket")
|
||||
if !ok {
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_gsum")
|
||||
if !ok {
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_gcount")
|
||||
if !ok {
|
||||
gotMFName = mName
|
||||
}
|
||||
}
|
||||
}
|
||||
case model.MetricTypeSummary:
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_sum")
|
||||
if !ok {
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_count")
|
||||
if !ok {
|
||||
gotMFName = mName
|
||||
}
|
||||
}
|
||||
case model.MetricTypeInfo:
|
||||
// Technically prometheus text does not support info type, but we might
|
||||
// accidentally allow info type in prom parse, so support metric family names
|
||||
// with the _info explicitly too.
|
||||
mfNameStr, _ = strings.CutSuffix(mfNameStr, "_info")
|
||||
|
||||
gotMFName, ok = strings.CutSuffix(mName, "_info")
|
||||
if !ok {
|
||||
gotMFName = mName
|
||||
}
|
||||
default:
|
||||
gotMFName = mName
|
||||
}
|
||||
return mfNameStr == gotMFName
|
||||
}
|
||||
|
||||
// Adds samples to the appender, checking the error, and then returns the # of samples added,
|
||||
// whether the caller should continue to process more samples, and any sample or bucket limit errors.
|
||||
func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
|
||||
|
@ -1934,17 +1989,80 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke
|
|||
}
|
||||
}
|
||||
|
||||
// reportSample represents automatically generated timeseries documented in
|
||||
// https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series
|
||||
type reportSample struct {
|
||||
metadata.Metadata
|
||||
name []byte
|
||||
}
|
||||
|
||||
// The constants are suffixed with the invalid \xff unicode rune to avoid collisions
|
||||
// with scraped metrics in the cache.
|
||||
var (
|
||||
scrapeHealthMetricName = []byte("up" + "\xff")
|
||||
scrapeDurationMetricName = []byte("scrape_duration_seconds" + "\xff")
|
||||
scrapeSamplesMetricName = []byte("scrape_samples_scraped" + "\xff")
|
||||
samplesPostRelabelMetricName = []byte("scrape_samples_post_metric_relabeling" + "\xff")
|
||||
scrapeSeriesAddedMetricName = []byte("scrape_series_added" + "\xff")
|
||||
scrapeTimeoutMetricName = []byte("scrape_timeout_seconds" + "\xff")
|
||||
scrapeSampleLimitMetricName = []byte("scrape_sample_limit" + "\xff")
|
||||
scrapeBodySizeBytesMetricName = []byte("scrape_body_size_bytes" + "\xff")
|
||||
scrapeHealthMetric = reportSample{
|
||||
name: []byte("up" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "Health of the scrape target. 1 means the target is healthy, 0 if the scrape failed.",
|
||||
Unit: "targets",
|
||||
},
|
||||
}
|
||||
scrapeDurationMetric = reportSample{
|
||||
name: []byte("scrape_duration_seconds" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "Duration of the last scrape in seconds.",
|
||||
Unit: "seconds",
|
||||
},
|
||||
}
|
||||
scrapeSamplesMetric = reportSample{
|
||||
name: []byte("scrape_samples_scraped" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "Number of samples last scraped.",
|
||||
Unit: "samples",
|
||||
},
|
||||
}
|
||||
samplesPostRelabelMetric = reportSample{
|
||||
name: []byte("scrape_samples_post_metric_relabeling" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "Number of samples remaining after metric relabeling was applied.",
|
||||
Unit: "samples",
|
||||
},
|
||||
}
|
||||
scrapeSeriesAddedMetric = reportSample{
|
||||
name: []byte("scrape_series_added" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "Number of series in the last scrape.",
|
||||
Unit: "series",
|
||||
},
|
||||
}
|
||||
scrapeTimeoutMetric = reportSample{
|
||||
name: []byte("scrape_timeout_seconds" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "The configured scrape timeout for a target.",
|
||||
Unit: "seconds",
|
||||
},
|
||||
}
|
||||
scrapeSampleLimitMetric = reportSample{
|
||||
name: []byte("scrape_sample_limit" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "The configured sample limit for a target. Returns zero if there is no limit configured.",
|
||||
Unit: "samples",
|
||||
},
|
||||
}
|
||||
scrapeBodySizeBytesMetric = reportSample{
|
||||
name: []byte("scrape_body_size_bytes" + "\xff"),
|
||||
Metadata: metadata.Metadata{
|
||||
Type: model.MetricTypeGauge,
|
||||
Help: "The uncompressed size of the last scrape response, if successful. Scrapes failing because body_size_limit is exceeded report -1, other scrape failures report 0.",
|
||||
Unit: "bytes",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration time.Duration, scraped, added, seriesAdded, bytes int, scrapeErr error) (err error) {
|
||||
|
@ -1958,29 +2076,29 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim
|
|||
}
|
||||
b := labels.NewBuilderWithSymbolTable(sl.symbolTable)
|
||||
|
||||
if err = sl.addReportSample(app, scrapeHealthMetricName, ts, health, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeHealthMetric, ts, health, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeDurationMetricName, ts, duration.Seconds(), b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeDurationMetric, ts, duration.Seconds(), b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, float64(scraped), b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeSamplesMetric, ts, float64(scraped), b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, float64(added), b); err != nil {
|
||||
if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, float64(added), b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, float64(seriesAdded), b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, float64(seriesAdded), b); err != nil {
|
||||
return
|
||||
}
|
||||
if sl.reportExtraMetrics {
|
||||
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, sl.timeout.Seconds(), b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, sl.timeout.Seconds(), b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, float64(sl.sampleLimit), b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, float64(sl.sampleLimit), b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, float64(bytes), b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, float64(bytes), b); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -1993,37 +2111,37 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
|
|||
stale := math.Float64frombits(value.StaleNaN)
|
||||
b := labels.NewBuilder(labels.EmptyLabels())
|
||||
|
||||
if err = sl.addReportSample(app, scrapeHealthMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeHealthMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeDurationMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeDurationMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSamplesMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeSamplesMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, samplesPostRelabelMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, samplesPostRelabelMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSeriesAddedMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeSeriesAddedMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if sl.reportExtraMetrics {
|
||||
if err = sl.addReportSample(app, scrapeTimeoutMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeTimeoutMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeSampleLimitMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeSampleLimitMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = sl.addReportSample(app, scrapeBodySizeBytesMetricName, ts, stale, b); err != nil {
|
||||
if err = sl.addReportSample(app, scrapeBodySizeBytesMetric, ts, stale, b); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error {
|
||||
ce, ok, _ := sl.cache.get(s)
|
||||
func (sl *scrapeLoop) addReportSample(app storage.Appender, s reportSample, t int64, v float64, b *labels.Builder) error {
|
||||
ce, ok, _ := sl.cache.get(s.name)
|
||||
var ref storage.SeriesRef
|
||||
var lset labels.Labels
|
||||
if ok {
|
||||
|
@ -2034,7 +2152,7 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v
|
|||
// with scraped metrics in the cache.
|
||||
// We have to drop it when building the actual metric.
|
||||
b.Reset(labels.EmptyLabels())
|
||||
b.Set(labels.MetricName, string(s[:len(s)-1]))
|
||||
b.Set(labels.MetricName, string(s.name[:len(s.name)-1]))
|
||||
lset = sl.reportSampleMutator(b.Labels())
|
||||
}
|
||||
|
||||
|
@ -2042,7 +2160,13 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v
|
|||
switch {
|
||||
case err == nil:
|
||||
if !ok {
|
||||
sl.cache.addRef(s, ref, lset, lset.Hash())
|
||||
sl.cache.addRef(s.name, ref, lset, lset.Hash())
|
||||
// We only need to add metadata once a scrape target appears.
|
||||
if sl.appendMetadataToWAL {
|
||||
if _, merr := app.UpdateMetadata(ref, lset, s.Metadata); merr != nil {
|
||||
sl.l.Debug("Error when appending metadata in addReportSample", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", s.Metadata), "err", merr)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp):
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -49,6 +50,7 @@ import (
|
|||
"github.com/prometheus/prometheus/model/exemplar"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/metadata"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -95,7 +97,9 @@ func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) {
|
|||
// Test with default OutOfOrderTimeWindow (0)
|
||||
t.Run("Out-Of-Order Sample Disabled", func(t *testing.T) {
|
||||
s := teststorage.New(t)
|
||||
defer s.Close()
|
||||
t.Cleanup(func() {
|
||||
_ = s.Close()
|
||||
})
|
||||
|
||||
runScrapeLoopTest(t, s, false)
|
||||
})
|
||||
|
@ -103,7 +107,9 @@ func TestStorageHandlesOutOfOrderTimestamps(t *testing.T) {
|
|||
// Test with specific OutOfOrderTimeWindow (600000)
|
||||
t.Run("Out-Of-Order Sample Enabled", func(t *testing.T) {
|
||||
s := teststorage.New(t, 600000)
|
||||
defer s.Close()
|
||||
t.Cleanup(func() {
|
||||
_ = s.Close()
|
||||
})
|
||||
|
||||
runScrapeLoopTest(t, s, true)
|
||||
})
|
||||
|
@ -125,13 +131,13 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
|
|||
timestampInorder2 := now.Add(5 * time.Minute)
|
||||
|
||||
slApp := sl.appender(context.Background())
|
||||
_, _, _, err := sl.append(slApp, []byte(`metric_a{a="1",b="1"} 1`), "text/plain", timestampInorder1)
|
||||
_, _, _, err := sl.append(slApp, []byte(`metric_total{a="1",b="1"} 1`), "text/plain", timestampInorder1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder)
|
||||
_, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 2`), "text/plain", timestampOutOfOrder)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, _, err = sl.append(slApp, []byte(`metric_a{a="1",b="1"} 3`), "text/plain", timestampInorder2)
|
||||
_, _, _, err = sl.append(slApp, []byte(`metric_total{a="1",b="1"} 3`), "text/plain", timestampInorder2)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, slApp.Commit())
|
||||
|
@ -144,7 +150,7 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
|
|||
defer q.Close()
|
||||
|
||||
// Use a matcher to filter the metric name.
|
||||
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_a"))
|
||||
series := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "__name__", "metric_total"))
|
||||
|
||||
var results []floatSample
|
||||
for series.Next() {
|
||||
|
@ -164,12 +170,12 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
|
|||
// Define the expected results
|
||||
want := []floatSample{
|
||||
{
|
||||
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
|
||||
metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"),
|
||||
t: timestamp.FromTime(timestampInorder1),
|
||||
f: 1,
|
||||
},
|
||||
{
|
||||
metric: labels.FromStrings("__name__", "metric_a", "a", "1", "b", "1"),
|
||||
metric: labels.FromStrings("__name__", "metric_total", "a", "1", "b", "1"),
|
||||
t: timestamp.FromTime(timestampInorder2),
|
||||
f: 3,
|
||||
},
|
||||
|
@ -182,6 +188,134 @@ func runScrapeLoopTest(t *testing.T, s *teststorage.TestStorage, expectOutOfOrde
|
|||
}
|
||||
}
|
||||
|
||||
// Regression test against https://github.com/prometheus/prometheus/issues/15831.
|
||||
func TestScrapeAppendMetadataUpdate(t *testing.T) {
|
||||
const (
|
||||
scrape1 = `# TYPE test_metric counter
|
||||
# HELP test_metric some help text
|
||||
# UNIT test_metric metric
|
||||
test_metric_total 1
|
||||
# TYPE test_metric2 gauge
|
||||
# HELP test_metric2 other help text
|
||||
test_metric2{foo="bar"} 2
|
||||
# TYPE test_metric3 gauge
|
||||
# HELP test_metric3 this represents tricky case of "broken" text that is not trivial to detect
|
||||
test_metric3_metric4{foo="bar"} 2
|
||||
# EOF`
|
||||
scrape2 = `# TYPE test_metric counter
|
||||
# HELP test_metric different help text
|
||||
test_metric_total 11
|
||||
# TYPE test_metric2 gauge
|
||||
# HELP test_metric2 other help text
|
||||
# UNIT test_metric2 metric2
|
||||
test_metric2{foo="bar"} 22
|
||||
# EOF`
|
||||
)
|
||||
|
||||
// Create an appender for adding samples to the storage.
|
||||
capp := &collectResultAppender{next: nopAppender{}}
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nil, func(ctx context.Context) storage.Appender { return capp }, 0)
|
||||
|
||||
now := time.Now()
|
||||
slApp := sl.appender(context.Background())
|
||||
_, _, _, err := sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, slApp.Commit())
|
||||
testutil.RequireEqualWithOptions(t, []metadataEntry{
|
||||
{metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "some help text"}},
|
||||
{metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "", Help: "other help text"}},
|
||||
}, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
|
||||
capp.resultMetadata = nil
|
||||
|
||||
// Next (the same) scrape should not add new metadata entries.
|
||||
slApp = sl.appender(context.Background())
|
||||
_, _, _, err = sl.append(slApp, []byte(scrape1), "application/openmetrics-text", now.Add(15*time.Second))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, slApp.Commit())
|
||||
testutil.RequireEqualWithOptions(t, []metadataEntry(nil), capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
|
||||
|
||||
slApp = sl.appender(context.Background())
|
||||
_, _, _, err = sl.append(slApp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, slApp.Commit())
|
||||
testutil.RequireEqualWithOptions(t, []metadataEntry{
|
||||
{metric: labels.FromStrings("__name__", "test_metric_total"), m: metadata.Metadata{Type: "counter", Unit: "metric", Help: "different help text"}}, // Here, technically we should have no unit, but it's a known limitation of the current implementation.
|
||||
{metric: labels.FromStrings("__name__", "test_metric2", "foo", "bar"), m: metadata.Metadata{Type: "gauge", Unit: "metric2", Help: "other help text"}},
|
||||
}, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
|
||||
}
|
||||
|
||||
type nopScraper struct {
|
||||
scraper
|
||||
}
|
||||
|
||||
func (n nopScraper) Report(start time.Time, dur time.Duration, err error) {}
|
||||
|
||||
func TestScrapeReportMetadataUpdate(t *testing.T) {
|
||||
// Create an appender for adding samples to the storage.
|
||||
capp := &collectResultAppender{next: nopAppender{}}
|
||||
sl := newBasicScrapeLoop(t, context.Background(), nopScraper{}, func(ctx context.Context) storage.Appender { return capp }, 0)
|
||||
now := time.Now()
|
||||
slApp := sl.appender(context.Background())
|
||||
|
||||
require.NoError(t, sl.report(slApp, now, 2*time.Second, 1, 1, 1, 512, nil))
|
||||
require.NoError(t, slApp.Commit())
|
||||
testutil.RequireEqualWithOptions(t, []metadataEntry{
|
||||
{metric: labels.FromStrings("__name__", "up"), m: scrapeHealthMetric.Metadata},
|
||||
{metric: labels.FromStrings("__name__", "scrape_duration_seconds"), m: scrapeDurationMetric.Metadata},
|
||||
{metric: labels.FromStrings("__name__", "scrape_samples_scraped"), m: scrapeSamplesMetric.Metadata},
|
||||
{metric: labels.FromStrings("__name__", "scrape_samples_post_metric_relabeling"), m: samplesPostRelabelMetric.Metadata},
|
||||
{metric: labels.FromStrings("__name__", "scrape_series_added"), m: scrapeSeriesAddedMetric.Metadata},
|
||||
}, capp.resultMetadata, []cmp.Option{cmp.Comparer(metadataEntryEqual)})
|
||||
}
|
||||
|
||||
func TestIsSeriesPartOfFamily(t *testing.T) {
|
||||
t.Run("counter", func(t *testing.T) {
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests_total"), model.MetricTypeCounter)) // Prometheus text style.
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests"), model.MetricTypeCounter)) // OM text style.
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests_total"), model.MetricTypeUnknown))
|
||||
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_total", []byte("http_requests"), model.MetricTypeUnknown)) // We don't know.
|
||||
require.False(t, isSeriesPartOfFamily("http_requests2_total", []byte("http_requests_total"), model.MetricTypeCounter))
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_requests_total", []byte("http_requests"), model.MetricTypeCounter))
|
||||
})
|
||||
|
||||
t.Run("gauge", func(t *testing.T) {
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_count", []byte("http_requests_count"), model.MetricTypeGauge))
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_count", []byte("http_requests_count"), model.MetricTypeUnknown))
|
||||
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_count2", []byte("http_requests_count"), model.MetricTypeCounter))
|
||||
})
|
||||
|
||||
t.Run("histogram", func(t *testing.T) {
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeHistogram))
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds_count", []byte("http_requests_seconds"), model.MetricTypeHistogram))
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds_bucket", []byte("http_requests_seconds"), model.MetricTypeHistogram))
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds", []byte("http_requests_seconds"), model.MetricTypeHistogram))
|
||||
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeUnknown)) // We don't know.
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_seconds2_sum", []byte("http_requests_seconds"), model.MetricTypeHistogram))
|
||||
})
|
||||
|
||||
t.Run("summary", func(t *testing.T) {
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeSummary))
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds_count", []byte("http_requests_seconds"), model.MetricTypeSummary))
|
||||
require.True(t, isSeriesPartOfFamily("http_requests_seconds", []byte("http_requests_seconds"), model.MetricTypeSummary))
|
||||
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_seconds_sum", []byte("http_requests_seconds"), model.MetricTypeUnknown)) // We don't know.
|
||||
require.False(t, isSeriesPartOfFamily("http_requests_seconds2_sum", []byte("http_requests_seconds"), model.MetricTypeSummary))
|
||||
})
|
||||
|
||||
t.Run("info", func(t *testing.T) {
|
||||
require.True(t, isSeriesPartOfFamily("go_build_info", []byte("go_build_info"), model.MetricTypeInfo)) // Prometheus text style.
|
||||
require.True(t, isSeriesPartOfFamily("go_build_info", []byte("go_build"), model.MetricTypeInfo)) // OM text style.
|
||||
require.True(t, isSeriesPartOfFamily("go_build_info", []byte("go_build_info"), model.MetricTypeUnknown))
|
||||
|
||||
require.False(t, isSeriesPartOfFamily("go_build_info", []byte("go_build"), model.MetricTypeUnknown)) // We don't know.
|
||||
require.False(t, isSeriesPartOfFamily("go_build2_info", []byte("go_build_info"), model.MetricTypeInfo))
|
||||
require.False(t, isSeriesPartOfFamily("go_build_build_info", []byte("go_build_info"), model.MetricTypeInfo))
|
||||
})
|
||||
}
|
||||
|
||||
func TestDroppedTargetsList(t *testing.T) {
|
||||
var (
|
||||
app = &nopAppendable{}
|
||||
|
@ -823,7 +957,7 @@ func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper s
|
|||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
nil,
|
||||
false,
|
||||
newTestScrapeMetrics(t),
|
||||
|
@ -1130,7 +1264,7 @@ func TestScrapeLoopMetadata(t *testing.T) {
|
|||
total, _, _, err := sl.append(slApp, []byte(`# TYPE test_metric counter
|
||||
# HELP test_metric some help text
|
||||
# UNIT test_metric metric
|
||||
test_metric 1
|
||||
test_metric_total 1
|
||||
# TYPE test_metric_no_help gauge
|
||||
# HELP test_metric_no_type other help text
|
||||
# EOF`), "application/openmetrics-text", time.Now())
|
||||
|
@ -1256,42 +1390,73 @@ func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) {
|
|||
func makeTestMetrics(n int) []byte {
|
||||
// Construct a metrics string to parse
|
||||
sb := bytes.Buffer{}
|
||||
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
||||
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
||||
for i := 0; i < n; i++ {
|
||||
fmt.Fprintf(&sb, "# TYPE metric_a gauge\n")
|
||||
fmt.Fprintf(&sb, "# HELP metric_a help text\n")
|
||||
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
|
||||
}
|
||||
fmt.Fprintf(&sb, "# EOF\n")
|
||||
return sb.Bytes()
|
||||
}
|
||||
|
||||
func BenchmarkScrapeLoopAppend(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
func promTextToProto(tb testing.TB, text []byte) []byte {
|
||||
tb.Helper()
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
metrics := makeTestMetrics(100)
|
||||
ts := time.Time{}
|
||||
d := expfmt.NewDecoder(bytes.NewReader(text), expfmt.TextVersion)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, _ = sl.append(slApp, metrics, "text/plain", ts)
|
||||
pb := &dto.MetricFamily{}
|
||||
if err := d.Decode(pb); err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
o, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
buf := bytes.Buffer{}
|
||||
// Write first length, then binary protobuf.
|
||||
varintBuf := binary.AppendUvarint(nil, uint64(len(o)))
|
||||
buf.Write(varintBuf)
|
||||
buf.Write(o)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func BenchmarkScrapeLoopAppendOM(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
/*
|
||||
export bench=scrape-loop-v1 && go test \
|
||||
-run '^$' -bench '^BenchmarkScrapeLoopAppend' \
|
||||
-benchtime 5s -count 6 -cpu 2 -timeout 999m \
|
||||
| tee ${bench}.txt
|
||||
*/
|
||||
func BenchmarkScrapeLoopAppend(b *testing.B) {
|
||||
metricsText := makeTestMetrics(100)
|
||||
|
||||
slApp := sl.appender(ctx)
|
||||
metrics := makeTestMetrics(100)
|
||||
ts := time.Time{}
|
||||
// Create proto representation.
|
||||
metricsProto := promTextToProto(b, metricsText)
|
||||
|
||||
b.ResetTimer()
|
||||
for _, bcase := range []struct {
|
||||
name string
|
||||
contentType string
|
||||
parsable []byte
|
||||
}{
|
||||
{name: "PromText", contentType: "text/plain", parsable: metricsText},
|
||||
{name: "OMText", contentType: "application/openmetrics-text", parsable: metricsText},
|
||||
{name: "PromProto", contentType: "application/vnd.google.protobuf", parsable: metricsProto},
|
||||
} {
|
||||
b.Run(fmt.Sprintf("fmt=%v", bcase.name), func(b *testing.B) {
|
||||
ctx, sl := simpleTestScrapeLoop(b)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, _ = sl.append(slApp, metrics, "application/openmetrics-text", ts)
|
||||
slApp := sl.appender(ctx)
|
||||
ts := time.Time{}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ts = ts.Add(time.Second)
|
||||
_, _, _, err := sl.append(slApp, bcase.parsable, bcase.contentType, ts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2454,18 +2619,7 @@ metric: <
|
|||
|
||||
buf := &bytes.Buffer{}
|
||||
if test.contentType == "application/vnd.google.protobuf" {
|
||||
// In case of protobuf, we have to create the binary representation.
|
||||
pb := &dto.MetricFamily{}
|
||||
// From text to proto message.
|
||||
require.NoError(t, proto.UnmarshalText(test.scrapeText, pb))
|
||||
// From proto message to binary protobuf.
|
||||
protoBuf, err := proto.Marshal(pb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write first length, then binary protobuf.
|
||||
varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf)))
|
||||
buf.Write(varintBuf)
|
||||
buf.Write(protoBuf)
|
||||
require.NoError(t, textToProto(test.scrapeText, buf))
|
||||
} else {
|
||||
buf.WriteString(test.scrapeText)
|
||||
}
|
||||
|
@ -2480,6 +2634,26 @@ metric: <
|
|||
}
|
||||
}
|
||||
|
||||
func textToProto(text string, buf *bytes.Buffer) error {
|
||||
// In case of protobuf, we have to create the binary representation.
|
||||
pb := &dto.MetricFamily{}
|
||||
// From text to proto message.
|
||||
err := proto.UnmarshalText(text, pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// From proto message to binary protobuf.
|
||||
protoBuf, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Write first length, then binary protobuf.
|
||||
varintBuf := binary.AppendUvarint(nil, uint64(len(protoBuf)))
|
||||
buf.Write(varintBuf)
|
||||
buf.Write(protoBuf)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestScrapeLoopAppendExemplarSeries(t *testing.T) {
|
||||
scrapeText := []string{`metric_total{n="1"} 1 # {t="1"} 1.0 10000
|
||||
# EOF`, `metric_total{n="1"} 2 # {t="2"} 2.0 20000
|
||||
|
|
|
@ -78,17 +78,17 @@ func (t *Target) String() string {
|
|||
// MetricMetadataStore represents a storage for metadata.
|
||||
type MetricMetadataStore interface {
|
||||
ListMetadata() []MetricMetadata
|
||||
GetMetadata(metric string) (MetricMetadata, bool)
|
||||
GetMetadata(mfName string) (MetricMetadata, bool)
|
||||
SizeMetadata() int
|
||||
LengthMetadata() int
|
||||
}
|
||||
|
||||
// MetricMetadata is a piece of metadata for a metric.
|
||||
// MetricMetadata is a piece of metadata for a metric family.
|
||||
type MetricMetadata struct {
|
||||
Metric string
|
||||
Type model.MetricType
|
||||
Help string
|
||||
Unit string
|
||||
MetricFamily string
|
||||
Type model.MetricType
|
||||
Help string
|
||||
Unit string
|
||||
}
|
||||
|
||||
func (t *Target) ListMetadata() []MetricMetadata {
|
||||
|
@ -124,14 +124,14 @@ func (t *Target) LengthMetadata() int {
|
|||
}
|
||||
|
||||
// GetMetadata returns type and help metadata for the given metric.
|
||||
func (t *Target) GetMetadata(metric string) (MetricMetadata, bool) {
|
||||
func (t *Target) GetMetadata(mfName string) (MetricMetadata, bool) {
|
||||
t.mtx.RLock()
|
||||
defer t.mtx.RUnlock()
|
||||
|
||||
if t.metadata == nil {
|
||||
return MetricMetadata{}, false
|
||||
}
|
||||
return t.metadata.GetMetadata(metric)
|
||||
return t.metadata.GetMetadata(mfName)
|
||||
}
|
||||
|
||||
func (t *Target) SetMetadataStore(s MetricMetadataStore) {
|
||||
|
@ -295,12 +295,12 @@ func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Durati
|
|||
intervalLabel := t.labels.Get(model.ScrapeIntervalLabel)
|
||||
interval, err := model.ParseDuration(intervalLabel)
|
||||
if err != nil {
|
||||
return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err)
|
||||
return defaultInterval, defaultDuration, fmt.Errorf("error parsing interval label %q: %w", intervalLabel, err)
|
||||
}
|
||||
timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel)
|
||||
timeout, err := model.ParseDuration(timeoutLabel)
|
||||
if err != nil {
|
||||
return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err)
|
||||
return defaultInterval, defaultDuration, fmt.Errorf("error parsing timeout label %q: %w", timeoutLabel, err)
|
||||
}
|
||||
|
||||
return time.Duration(interval), time.Duration(timeout), nil
|
||||
|
|
|
@ -36,4 +36,4 @@ jobs:
|
|||
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||
with:
|
||||
args: --verbose
|
||||
version: v1.62.0
|
||||
version: v1.63.4
|
||||
|
|
|
@ -81,8 +81,8 @@ var (
|
|||
remoteReadQueriesTotal = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "read_queries_total",
|
||||
Subsystem: "remote_read_client",
|
||||
Name: "queries_total",
|
||||
Help: "The total number of remote read queries.",
|
||||
},
|
||||
[]string{remoteName, endpoint, "response_type", "code"},
|
||||
|
@ -90,8 +90,8 @@ var (
|
|||
remoteReadQueries = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "remote_read_queries",
|
||||
Subsystem: "remote_read_client",
|
||||
Name: "queries",
|
||||
Help: "The number of in-flight remote read queries.",
|
||||
},
|
||||
[]string{remoteName, endpoint},
|
||||
|
@ -99,8 +99,8 @@ var (
|
|||
remoteReadQueryDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "read_request_duration_seconds",
|
||||
Subsystem: "remote_read_client",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "Histogram of the latency for remote read requests. Note that for streamed responses this is only the duration of the initial call and does not include the processing of the stream.",
|
||||
Buckets: append(prometheus.DefBuckets, 25, 60),
|
||||
NativeHistogramBucketFactor: 1.1,
|
||||
|
|
|
@ -38,7 +38,7 @@ type Watchable interface {
|
|||
type noopScrapeManager struct{}
|
||||
|
||||
func (noop *noopScrapeManager) Get() (*scrape.Manager, error) {
|
||||
return nil, errors.New("Scrape manager not ready")
|
||||
return nil, errors.New("scrape manager not ready")
|
||||
}
|
||||
|
||||
// MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo.
|
||||
|
|
|
@ -40,9 +40,9 @@ func (s *TestMetaStore) ListMetadata() []scrape.MetricMetadata {
|
|||
return s.Metadata
|
||||
}
|
||||
|
||||
func (s *TestMetaStore) GetMetadata(metric string) (scrape.MetricMetadata, bool) {
|
||||
func (s *TestMetaStore) GetMetadata(mfName string) (scrape.MetricMetadata, bool) {
|
||||
for _, m := range s.Metadata {
|
||||
if metric == m.Metric {
|
||||
if mfName == m.MetricFamily {
|
||||
return m, true
|
||||
}
|
||||
}
|
||||
|
@ -106,26 +106,26 @@ func TestWatchScrapeManager_ReadyForCollection(t *testing.T) {
|
|||
metadata := &TestMetaStore{
|
||||
Metadata: []scrape.MetricMetadata{
|
||||
{
|
||||
Metric: "prometheus_tsdb_head_chunks_created_total",
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Total number",
|
||||
Unit: "",
|
||||
MetricFamily: "prometheus_tsdb_head_chunks_created",
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Total number",
|
||||
Unit: "",
|
||||
},
|
||||
{
|
||||
Metric: "prometheus_remote_storage_retried_samples_total",
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Total number",
|
||||
Unit: "",
|
||||
MetricFamily: "prometheus_remote_storage_retried_samples",
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Total number",
|
||||
Unit: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
metadataDup := &TestMetaStore{
|
||||
Metadata: []scrape.MetricMetadata{
|
||||
{
|
||||
Metric: "prometheus_tsdb_head_chunks_created_total",
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Total number",
|
||||
Unit: "",
|
||||
MetricFamily: "prometheus_tsdb_head_chunks_created",
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "Total number",
|
||||
Unit: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/golang/go/blob/f2d118fd5f7e872804a5825ce29797f81a28b0fa/src/strings/strings.go
|
||||
// Provenance-includes-license: BSD-3-Clause
|
||||
// Provenance-includes-copyright: Copyright The Go Authors.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "strings"
|
||||
|
||||
// fieldsFunc is a copy of strings.FieldsFunc from the Go standard library,
|
||||
// but it also returns the separators as part of the result.
|
||||
func fieldsFunc(s string, f func(rune) bool) ([]string, []string) {
|
||||
// A span is used to record a slice of s of the form s[start:end].
|
||||
// The start index is inclusive and the end index is exclusive.
|
||||
type span struct {
|
||||
start int
|
||||
end int
|
||||
}
|
||||
spans := make([]span, 0, 32)
|
||||
separators := make([]string, 0, 32)
|
||||
|
||||
// Find the field start and end indices.
|
||||
// Doing this in a separate pass (rather than slicing the string s
|
||||
// and collecting the result substrings right away) is significantly
|
||||
// more efficient, possibly due to cache effects.
|
||||
start := -1 // valid span start if >= 0
|
||||
for end, rune := range s {
|
||||
if f(rune) {
|
||||
if start >= 0 {
|
||||
spans = append(spans, span{start, end})
|
||||
// Set start to a negative value.
|
||||
// Note: using -1 here consistently and reproducibly
|
||||
// slows down this code by a several percent on amd64.
|
||||
start = ^start
|
||||
separators = append(separators, string(s[end]))
|
||||
}
|
||||
} else {
|
||||
if start < 0 {
|
||||
start = end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Last field might end at EOF.
|
||||
if start >= 0 {
|
||||
spans = append(spans, span{start, len(s)})
|
||||
}
|
||||
|
||||
// Create strings from recorded field indices.
|
||||
a := make([]string, len(spans))
|
||||
for i, span := range spans {
|
||||
a[i] = s[span.start:span.end]
|
||||
}
|
||||
|
||||
return a, separators
|
||||
}
|
||||
|
||||
// join is a copy of strings.Join from the Go standard library,
|
||||
// but it also accepts a slice of separators to join the elements with.
|
||||
// If the slice of separators is shorter than the slice of elements, use a default value.
|
||||
// We also don't check for integer overflow.
|
||||
func join(elems []string, separators []string, def string) string {
|
||||
switch len(elems) {
|
||||
case 0:
|
||||
return ""
|
||||
case 1:
|
||||
return elems[0]
|
||||
}
|
||||
|
||||
var n int
|
||||
var sep string
|
||||
sepLen := len(separators)
|
||||
for i, elem := range elems {
|
||||
if i >= sepLen {
|
||||
sep = def
|
||||
} else {
|
||||
sep = separators[i]
|
||||
}
|
||||
n += len(sep) + len(elem)
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(n)
|
||||
b.WriteString(elems[0])
|
||||
for i, s := range elems[1:] {
|
||||
if i >= sepLen {
|
||||
sep = def
|
||||
} else {
|
||||
sep = separators[i]
|
||||
}
|
||||
b.WriteString(sep)
|
||||
b.WriteString(s)
|
||||
}
|
||||
return b.String()
|
||||
}
|
|
@ -78,7 +78,7 @@ var perUnitMap = map[string]string{
|
|||
"y": "year",
|
||||
}
|
||||
|
||||
// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric.
|
||||
// BuildCompliantMetricName builds a Prometheus-compliant metric name for the specified metric.
|
||||
//
|
||||
// Metric name is prefixed with specified namespace and underscore (if any).
|
||||
// Namespace is not cleaned up. Make sure specified namespace follows Prometheus
|
||||
|
@ -87,29 +87,24 @@ var perUnitMap = map[string]string{
|
|||
// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels,
|
||||
// https://prometheus.io/docs/practices/naming/#metric-and-label-naming
|
||||
// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
|
||||
func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes, allowUTF8 bool) string {
|
||||
func BuildCompliantMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
|
||||
// Full normalization following standard Prometheus naming conventions
|
||||
if addMetricSuffixes {
|
||||
return normalizeName(metric, namespace, allowUTF8)
|
||||
return normalizeName(metric, namespace)
|
||||
}
|
||||
|
||||
var metricName string
|
||||
if !allowUTF8 {
|
||||
// Simple case (no full normalization, no units, etc.).
|
||||
metricName = strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool {
|
||||
return invalidMetricCharRE.MatchString(string(r))
|
||||
}), "_")
|
||||
} else {
|
||||
metricName = metric.Name()
|
||||
}
|
||||
// Simple case (no full normalization, no units, etc.).
|
||||
metricName := strings.Join(strings.FieldsFunc(metric.Name(), func(r rune) bool {
|
||||
return invalidMetricCharRE.MatchString(string(r))
|
||||
}), "_")
|
||||
|
||||
// Namespace?
|
||||
if namespace != "" {
|
||||
return namespace + "_" + metricName
|
||||
}
|
||||
|
||||
// Metric name starts with a digit and utf8 not allowed? Prefix it with an underscore.
|
||||
if metricName != "" && unicode.IsDigit(rune(metricName[0])) && !allowUTF8 {
|
||||
// Metric name starts with a digit? Prefix it with an underscore.
|
||||
if metricName != "" && unicode.IsDigit(rune(metricName[0])) {
|
||||
metricName = "_" + metricName
|
||||
}
|
||||
|
||||
|
@ -124,70 +119,17 @@ var (
|
|||
)
|
||||
|
||||
// Build a normalized name for the specified metric.
|
||||
func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) string {
|
||||
var nameTokens []string
|
||||
var separators []string
|
||||
if !allowUTF8 {
|
||||
// Split metric name into "tokens" (of supported metric name runes).
|
||||
// Note that this has the side effect of replacing multiple consecutive underscores with a single underscore.
|
||||
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
|
||||
nameTokens = strings.FieldsFunc(
|
||||
metric.Name(),
|
||||
func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) },
|
||||
)
|
||||
} else {
|
||||
translationFunc := func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != ':' }
|
||||
// Split metric name into "tokens" (of supported metric name runes).
|
||||
nameTokens, separators = fieldsFunc(metric.Name(), translationFunc)
|
||||
}
|
||||
func normalizeName(metric pmetric.Metric, namespace string) string {
|
||||
// Split metric name into "tokens" (of supported metric name runes).
|
||||
// Note that this has the side effect of replacing multiple consecutive underscores with a single underscore.
|
||||
// This is part of the OTel to Prometheus specification: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.38.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus.
|
||||
nameTokens := strings.FieldsFunc(
|
||||
metric.Name(),
|
||||
func(r rune) bool { return nonMetricNameCharRE.MatchString(string(r)) },
|
||||
)
|
||||
|
||||
// Split unit at the '/' if any
|
||||
unitTokens := strings.SplitN(metric.Unit(), "/", 2)
|
||||
|
||||
// Main unit
|
||||
// Append if not blank, doesn't contain '{}', and is not present in metric name already
|
||||
if len(unitTokens) > 0 {
|
||||
var mainUnitProm, perUnitProm string
|
||||
mainUnitOTel := strings.TrimSpace(unitTokens[0])
|
||||
if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") {
|
||||
mainUnitProm = unitMapGetOrDefault(mainUnitOTel)
|
||||
if !allowUTF8 {
|
||||
mainUnitProm = cleanUpUnit(mainUnitProm)
|
||||
}
|
||||
if slices.Contains(nameTokens, mainUnitProm) {
|
||||
mainUnitProm = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Per unit
|
||||
// Append if not blank, doesn't contain '{}', and is not present in metric name already
|
||||
if len(unitTokens) > 1 && unitTokens[1] != "" {
|
||||
perUnitOTel := strings.TrimSpace(unitTokens[1])
|
||||
if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") {
|
||||
perUnitProm = perUnitMapGetOrDefault(perUnitOTel)
|
||||
if !allowUTF8 {
|
||||
perUnitProm = cleanUpUnit(perUnitProm)
|
||||
}
|
||||
}
|
||||
if perUnitProm != "" {
|
||||
perUnitProm = "per_" + perUnitProm
|
||||
if slices.Contains(nameTokens, perUnitProm) {
|
||||
perUnitProm = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if perUnitProm != "" {
|
||||
mainUnitProm = strings.TrimSuffix(mainUnitProm, "_")
|
||||
}
|
||||
|
||||
if mainUnitProm != "" {
|
||||
nameTokens = append(nameTokens, mainUnitProm)
|
||||
}
|
||||
if perUnitProm != "" {
|
||||
nameTokens = append(nameTokens, perUnitProm)
|
||||
}
|
||||
}
|
||||
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit())
|
||||
nameTokens = addUnitTokens(nameTokens, cleanUpUnit(mainUnitSuffix), cleanUpUnit(perUnitSuffix))
|
||||
|
||||
// Append _total for Counters
|
||||
if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() {
|
||||
|
@ -208,14 +150,8 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri
|
|||
nameTokens = append([]string{namespace}, nameTokens...)
|
||||
}
|
||||
|
||||
var normalizedName string
|
||||
if !allowUTF8 {
|
||||
// Build the string from the tokens, separated with underscores
|
||||
normalizedName = strings.Join(nameTokens, "_")
|
||||
} else {
|
||||
// Build the string from the tokens + separators.
|
||||
normalizedName = join(nameTokens, separators, "_")
|
||||
}
|
||||
// Build the string from the tokens, separated with underscores
|
||||
normalizedName := strings.Join(nameTokens, "_")
|
||||
|
||||
// Metric name cannot start with a digit, so prefix it with "_" in this case
|
||||
if normalizedName != "" && unicode.IsDigit(rune(normalizedName[0])) {
|
||||
|
@ -225,6 +161,39 @@ func normalizeName(metric pmetric.Metric, namespace string, allowUTF8 bool) stri
|
|||
return normalizedName
|
||||
}
|
||||
|
||||
// addUnitTokens will add the suffixes to the nameTokens if they are not already present.
|
||||
// It will also remove trailing underscores from the main suffix to avoid double underscores
|
||||
// when joining the tokens.
|
||||
//
|
||||
// If the 'per' unit ends with underscore, the underscore will be removed. If the per unit is just
|
||||
// 'per_', it will be entirely removed.
|
||||
func addUnitTokens(nameTokens []string, mainUnitSuffix, perUnitSuffix string) []string {
|
||||
if slices.Contains(nameTokens, mainUnitSuffix) {
|
||||
mainUnitSuffix = ""
|
||||
}
|
||||
|
||||
if perUnitSuffix == "per_" {
|
||||
perUnitSuffix = ""
|
||||
} else {
|
||||
perUnitSuffix = strings.TrimSuffix(perUnitSuffix, "_")
|
||||
if slices.Contains(nameTokens, perUnitSuffix) {
|
||||
perUnitSuffix = ""
|
||||
}
|
||||
}
|
||||
|
||||
if perUnitSuffix != "" {
|
||||
mainUnitSuffix = strings.TrimSuffix(mainUnitSuffix, "_")
|
||||
}
|
||||
|
||||
if mainUnitSuffix != "" {
|
||||
nameTokens = append(nameTokens, mainUnitSuffix)
|
||||
}
|
||||
if perUnitSuffix != "" {
|
||||
nameTokens = append(nameTokens, perUnitSuffix)
|
||||
}
|
||||
return nameTokens
|
||||
}
|
||||
|
||||
// cleanUpUnit cleans up unit so it matches model.LabelNameRE.
|
||||
func cleanUpUnit(unit string) string {
|
||||
// Multiple consecutive underscores are replaced with a single underscore.
|
||||
|
@ -263,3 +232,75 @@ func removeItem(slice []string, value string) []string {
|
|||
}
|
||||
return newSlice
|
||||
}
|
||||
|
||||
// BuildMetricName builds a valid metric name but without following Prometheus naming conventions.
|
||||
// It doesn't do any character transformation, it only prefixes the metric name with the namespace, if any,
|
||||
// and adds metric type suffixes, e.g. "_total" for counters and unit suffixes.
|
||||
//
|
||||
// Differently from BuildCompliantMetricName, it doesn't check for the presence of unit and type suffixes.
|
||||
// If "addMetricSuffixes" is true, it will add them anyway.
|
||||
//
|
||||
// Please use BuildCompliantMetricName for a metric name that follows Prometheus naming conventions.
|
||||
func BuildMetricName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string {
|
||||
metricName := metric.Name()
|
||||
|
||||
if namespace != "" {
|
||||
metricName = namespace + "_" + metricName
|
||||
}
|
||||
|
||||
if addMetricSuffixes {
|
||||
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(metric.Unit())
|
||||
if mainUnitSuffix != "" {
|
||||
metricName = metricName + "_" + mainUnitSuffix
|
||||
}
|
||||
if perUnitSuffix != "" {
|
||||
metricName = metricName + "_" + perUnitSuffix
|
||||
}
|
||||
|
||||
// Append _total for Counters
|
||||
if metric.Type() == pmetric.MetricTypeSum && metric.Sum().IsMonotonic() {
|
||||
metricName = metricName + "_total"
|
||||
}
|
||||
|
||||
// Append _ratio for metrics with unit "1"
|
||||
// Some OTel receivers improperly use unit "1" for counters of objects
|
||||
// See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions
|
||||
// Until these issues have been fixed, we're appending `_ratio` for gauges ONLY
|
||||
// Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons)
|
||||
if metric.Unit() == "1" && metric.Type() == pmetric.MetricTypeGauge {
|
||||
metricName = metricName + "_ratio"
|
||||
}
|
||||
}
|
||||
return metricName
|
||||
}
|
||||
|
||||
// buildUnitSuffixes builds the main and per unit suffixes for the specified unit
|
||||
// but doesn't do any special character transformation to accommodate Prometheus naming conventions.
|
||||
// Removing trailing underscores or appending suffixes is done in the caller.
|
||||
func buildUnitSuffixes(unit string) (mainUnitSuffix, perUnitSuffix string) {
|
||||
// Split unit at the '/' if any
|
||||
unitTokens := strings.SplitN(unit, "/", 2)
|
||||
|
||||
if len(unitTokens) > 0 {
|
||||
// Main unit
|
||||
// Update if not blank and doesn't contain '{}'
|
||||
mainUnitOTel := strings.TrimSpace(unitTokens[0])
|
||||
if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") {
|
||||
mainUnitSuffix = unitMapGetOrDefault(mainUnitOTel)
|
||||
}
|
||||
|
||||
// Per unit
|
||||
// Update if not blank and doesn't contain '{}'
|
||||
if len(unitTokens) > 1 && unitTokens[1] != "" {
|
||||
perUnitOTel := strings.TrimSpace(unitTokens[1])
|
||||
if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") {
|
||||
perUnitSuffix = perUnitMapGetOrDefault(perUnitOTel)
|
||||
}
|
||||
if perUnitSuffix != "" {
|
||||
perUnitSuffix = "per_" + perUnitSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mainUnitSuffix, perUnitSuffix
|
||||
}
|
|
@ -0,0 +1,257 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestByte(t *testing.T) {
|
||||
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), ""))
|
||||
}
|
||||
|
||||
func TestByteCounter(t *testing.T) {
|
||||
require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), ""))
|
||||
require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), ""))
|
||||
}
|
||||
|
||||
func TestWhiteSpaces(t *testing.T) {
|
||||
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), ""))
|
||||
}
|
||||
|
||||
func TestNonStandardUnit(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), ""))
|
||||
// The normal metric name character set is allowed in non-standard units.
|
||||
require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), ""))
|
||||
}
|
||||
|
||||
func TestNonStandardUnitCounter(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), ""))
|
||||
}
|
||||
|
||||
func TestBrokenUnit(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), ""))
|
||||
require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), ""))
|
||||
require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), ""))
|
||||
}
|
||||
|
||||
func TestBrokenUnitCounter(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), ""))
|
||||
require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), ""))
|
||||
require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), ""))
|
||||
}
|
||||
|
||||
func TestRatio(t *testing.T) {
|
||||
require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), ""))
|
||||
require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), ""))
|
||||
require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), ""))
|
||||
}
|
||||
|
||||
func TestHertz(t *testing.T) {
|
||||
require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), ""))
|
||||
}
|
||||
|
||||
func TestPer(t *testing.T) {
|
||||
require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), ""))
|
||||
require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), ""))
|
||||
// The normal metric name character set is allowed in non-standard units.
|
||||
require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), ""))
|
||||
|
||||
t.Run("invalid per unit", func(t *testing.T) {
|
||||
require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), ""))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPercent(t *testing.T) {
|
||||
require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), ""))
|
||||
require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), ""))
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), ""))
|
||||
require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), ""))
|
||||
}
|
||||
|
||||
func TestOTelReceivers(t *testing.T) {
|
||||
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), ""))
|
||||
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), ""))
|
||||
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), ""))
|
||||
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), ""))
|
||||
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), ""))
|
||||
require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), ""))
|
||||
require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), ""))
|
||||
require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), ""))
|
||||
require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), ""))
|
||||
require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), ""))
|
||||
require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), ""))
|
||||
require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), ""))
|
||||
require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), ""))
|
||||
require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), ""))
|
||||
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), ""))
|
||||
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), ""))
|
||||
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), ""))
|
||||
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), ""))
|
||||
require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), ""))
|
||||
require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), ""))
|
||||
require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), ""))
|
||||
require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), ""))
|
||||
}
|
||||
|
||||
func TestNamespace(t *testing.T) {
|
||||
require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space"))
|
||||
require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space"))
|
||||
}
|
||||
|
||||
func TestCleanUpUnit(t *testing.T) {
|
||||
require.Equal(t, "", cleanUpUnit(""))
|
||||
require.Equal(t, "a_b", cleanUpUnit("a b"))
|
||||
require.Equal(t, "hello_world", cleanUpUnit("hello, world"))
|
||||
require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2"))
|
||||
require.Equal(t, "1000", cleanUpUnit("$1000"))
|
||||
require.Equal(t, "", cleanUpUnit("*+$^=)"))
|
||||
}
|
||||
|
||||
func TestUnitMapGetOrDefault(t *testing.T) {
|
||||
require.Equal(t, "", unitMapGetOrDefault(""))
|
||||
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
|
||||
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
|
||||
}
|
||||
|
||||
func TestPerUnitMapGetOrDefault(t *testing.T) {
|
||||
require.Equal(t, "", perUnitMapGetOrDefault(""))
|
||||
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
|
||||
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
|
||||
}
|
||||
|
||||
func TestBuildUnitSuffixes(t *testing.T) {
|
||||
tests := []struct {
|
||||
unit string
|
||||
expectedMain string
|
||||
expectedPer string
|
||||
}{
|
||||
{"", "", ""},
|
||||
{"s", "seconds", ""},
|
||||
{"By/s", "bytes", "per_second"},
|
||||
{"requests/m", "requests", "per_minute"},
|
||||
{"{invalid}/second", "", "per_second"},
|
||||
{"bytes/{invalid}", "bytes", ""},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
mainUnitSuffix, perUnitSuffix := buildUnitSuffixes(test.unit)
|
||||
require.Equal(t, test.expectedMain, mainUnitSuffix)
|
||||
require.Equal(t, test.expectedPer, perUnitSuffix)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddUnitTokens(t *testing.T) {
|
||||
tests := []struct {
|
||||
nameTokens []string
|
||||
mainUnitSuffix string
|
||||
perUnitSuffix string
|
||||
expected []string
|
||||
}{
|
||||
{[]string{}, "", "", []string{}},
|
||||
{[]string{"token1"}, "main", "", []string{"token1", "main"}},
|
||||
{[]string{"token1"}, "", "per", []string{"token1", "per"}},
|
||||
{[]string{"token1"}, "main", "per", []string{"token1", "main", "per"}},
|
||||
{[]string{"token1", "per"}, "main", "per", []string{"token1", "per", "main"}},
|
||||
{[]string{"token1", "main"}, "main", "per", []string{"token1", "main", "per"}},
|
||||
{[]string{"token1"}, "main_", "per", []string{"token1", "main", "per"}},
|
||||
{[]string{"token1"}, "main_unit", "per_seconds_", []string{"token1", "main_unit", "per_seconds"}}, // trailing underscores are removed
|
||||
{[]string{"token1"}, "main_unit", "per_", []string{"token1", "main_unit"}}, // 'per_' is removed entirely
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := addUnitTokens(test.nameTokens, test.mainUnitSuffix, test.perUnitSuffix)
|
||||
require.Equal(t, test.expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveItem(t *testing.T) {
|
||||
require.Equal(t, []string{}, removeItem([]string{}, "test"))
|
||||
require.Equal(t, []string{}, removeItem([]string{}, ""))
|
||||
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
|
||||
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
|
||||
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
|
||||
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
|
||||
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
|
||||
}
|
||||
|
||||
func TestBuildCompliantMetricNameWithSuffixes(t *testing.T) {
|
||||
require.Equal(t, "system_io_bytes_total", BuildCompliantMetricName(createCounter("system.io", "By"), "", true))
|
||||
require.Equal(t, "system_network_io_bytes_total", BuildCompliantMetricName(createCounter("network.io", "By"), "system", true))
|
||||
require.Equal(t, "_3_14_digits", BuildCompliantMetricName(createGauge("3.14 digits", ""), "", true))
|
||||
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true))
|
||||
require.Equal(t, ":foo::bar", BuildCompliantMetricName(createGauge(":foo::bar", ""), "", true))
|
||||
require.Equal(t, ":foo::bar_total", BuildCompliantMetricName(createCounter(":foo::bar", ""), "", true))
|
||||
// Gauges with unit 1 are considered ratios.
|
||||
require.Equal(t, "foo_bar_ratio", BuildCompliantMetricName(createGauge("foo.bar", "1"), "", true))
|
||||
// Slashes in units are converted.
|
||||
require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantMetricName(createCounter("system.io", "foo/bar"), "", true))
|
||||
require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", true))
|
||||
// Removes non aplhanumerical characters from units, but leaves colons.
|
||||
require.Equal(t, "temperature_:C", BuildCompliantMetricName(createGauge("temperature", "%*()°:C"), "", true))
|
||||
}
|
||||
|
||||
func TestBuildCompliantMetricNameWithoutSuffixes(t *testing.T) {
|
||||
require.Equal(t, "system_io", BuildCompliantMetricName(createCounter("system.io", "By"), "", false))
|
||||
require.Equal(t, "system_network_io", BuildCompliantMetricName(createCounter("network.io", "By"), "system", false))
|
||||
require.Equal(t, "system_network_I_O", BuildCompliantMetricName(createCounter("network (I/O)", "By"), "system", false))
|
||||
require.Equal(t, "_3_14_digits", BuildCompliantMetricName(createGauge("3.14 digits", "By"), "", false))
|
||||
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false))
|
||||
require.Equal(t, ":foo::bar", BuildCompliantMetricName(createGauge(":foo::bar", ""), "", false))
|
||||
require.Equal(t, ":foo::bar", BuildCompliantMetricName(createCounter(":foo::bar", ""), "", false))
|
||||
require.Equal(t, "foo_bar", BuildCompliantMetricName(createGauge("foo.bar", "1"), "", false))
|
||||
require.Equal(t, "system_io", BuildCompliantMetricName(createCounter("system.io", "foo/bar"), "", false))
|
||||
require.Equal(t, "metric_with___foreign_characters", BuildCompliantMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", false))
|
||||
}
|
||||
|
||||
func TestBuildMetricNameWithSuffixes(t *testing.T) {
|
||||
require.Equal(t, "system.io_bytes_total", BuildMetricName(createCounter("system.io", "By"), "", true))
|
||||
require.Equal(t, "system_network.io_bytes_total", BuildMetricName(createCounter("network.io", "By"), "system", true))
|
||||
require.Equal(t, "3.14 digits", BuildMetricName(createGauge("3.14 digits", ""), "", true))
|
||||
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true))
|
||||
require.Equal(t, ":foo::bar", BuildMetricName(createGauge(":foo::bar", ""), "", true))
|
||||
require.Equal(t, ":foo::bar_total", BuildMetricName(createCounter(":foo::bar", ""), "", true))
|
||||
// Gauges with unit 1 are considered ratios.
|
||||
require.Equal(t, "foo.bar_ratio", BuildMetricName(createGauge("foo.bar", "1"), "", true))
|
||||
// Slashes in units are converted.
|
||||
require.Equal(t, "system.io_foo_per_bar_total", BuildMetricName(createCounter("system.io", "foo/bar"), "", true))
|
||||
require.Equal(t, "metric_with_字符_foreign_characters_total", BuildMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", true))
|
||||
require.Equal(t, "temperature_%*()°C", BuildMetricName(createGauge("temperature", "%*()°C"), "", true)) // Keeps the all characters in unit
|
||||
// Tests below show weird interactions that users can have with the metric names.
|
||||
// With BuildMetricName we don't check if units/type suffixes are already present in the metric name, we always add them.
|
||||
require.Equal(t, "system_io_seconds_seconds", BuildMetricName(createGauge("system_io_seconds", "s"), "", true))
|
||||
require.Equal(t, "system_io_total_total", BuildMetricName(createCounter("system_io_total", ""), "", true))
|
||||
}
|
||||
|
||||
func TestBuildMetricNameWithoutSuffixes(t *testing.T) {
|
||||
require.Equal(t, "system.io", BuildMetricName(createCounter("system.io", "By"), "", false))
|
||||
require.Equal(t, "system_network.io", BuildMetricName(createCounter("network.io", "By"), "system", false))
|
||||
require.Equal(t, "3.14 digits", BuildMetricName(createGauge("3.14 digits", ""), "", false))
|
||||
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildMetricName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false))
|
||||
require.Equal(t, ":foo::bar", BuildMetricName(createGauge(":foo::bar", ""), "", false))
|
||||
require.Equal(t, ":foo::bar", BuildMetricName(createCounter(":foo::bar", ""), "", false))
|
||||
// Gauges with unit 1 are considered ratios.
|
||||
require.Equal(t, "foo.bar", BuildMetricName(createGauge("foo.bar", "1"), "", false))
|
||||
require.Equal(t, "metric_with_字符_foreign_characters", BuildMetricName(createCounter("metric_with_字符_foreign_characters", ""), "", false))
|
||||
require.Equal(t, "system_io_seconds", BuildMetricName(createGauge("system_io_seconds", "s"), "", false))
|
||||
require.Equal(t, "system_io_total", BuildMetricName(createCounter("system_io_total", ""), "", false))
|
||||
}
|
|
@ -1,210 +0,0 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go
|
||||
// Provenance-includes-license: Apache-2.0
|
||||
// Provenance-includes-copyright: Copyright The OpenTelemetry Authors.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestByte(t *testing.T) {
|
||||
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "", false))
|
||||
}
|
||||
|
||||
func TestByteCounter(t *testing.T) {
|
||||
require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "", false))
|
||||
require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "", false))
|
||||
}
|
||||
|
||||
func TestWhiteSpaces(t *testing.T) {
|
||||
require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "", false))
|
||||
}
|
||||
|
||||
func TestNonStandardUnit(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "", false))
|
||||
// The normal metric name character set is allowed in non-standard units.
|
||||
require.Equal(t, "system_network_dropped_nonstandard:_1", normalizeName(createGauge("system.network.dropped", "nonstandard:_1"), "", false))
|
||||
}
|
||||
|
||||
func TestNonStandardUnitCounter(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "", false))
|
||||
}
|
||||
|
||||
func TestBrokenUnit(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "", false))
|
||||
require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "", false))
|
||||
require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "", false))
|
||||
}
|
||||
|
||||
func TestBrokenUnitCounter(t *testing.T) {
|
||||
require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "", false))
|
||||
require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "", false))
|
||||
require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "", false))
|
||||
}
|
||||
|
||||
func TestRatio(t *testing.T) {
|
||||
require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "", false))
|
||||
require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "", false))
|
||||
require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "", false))
|
||||
}
|
||||
|
||||
func TestHertz(t *testing.T) {
|
||||
require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "", false))
|
||||
}
|
||||
|
||||
func TestPer(t *testing.T) {
|
||||
require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "", false))
|
||||
require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "", false))
|
||||
// The normal metric name character set is allowed in non-standard units.
|
||||
require.Equal(t, "system_network_dropped_non_per_standard:_1", normalizeName(createGauge("system.network.dropped", "non/standard:_1"), "", false))
|
||||
|
||||
t.Run("invalid per unit", func(t *testing.T) {
|
||||
require.Equal(t, "broken_metric_speed_km", normalizeName(createGauge("broken.metric.speed", "km/°"), "", false))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPercent(t *testing.T) {
|
||||
require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "", false))
|
||||
require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "", false))
|
||||
}
|
||||
|
||||
func TestEmpty(t *testing.T) {
|
||||
require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "", false))
|
||||
require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "", false))
|
||||
}
|
||||
|
||||
func TestAllowUTF8(t *testing.T) {
|
||||
t.Run("allow UTF8", func(t *testing.T) {
|
||||
require.Equal(t, "unsupported.metric.temperature_°F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", true))
|
||||
require.Equal(t, "unsupported.metric.weird_+=.:,!* & #", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "", true))
|
||||
require.Equal(t, "unsupported.metric.redundant___test $_per_°C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", true))
|
||||
require.Equal(t, "metric_with_字符_foreign_characters_ど", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", true))
|
||||
})
|
||||
t.Run("disallow UTF8", func(t *testing.T) {
|
||||
require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "", false))
|
||||
require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.,!* & #"), "", false))
|
||||
require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "", false))
|
||||
require.Equal(t, "metric_with_foreign_characters", normalizeName(createGauge("metric_with_字符_foreign_characters", "ど"), "", false))
|
||||
})
|
||||
}
|
||||
|
||||
func TestAllowUTF8KnownBugs(t *testing.T) {
|
||||
// Due to historical reasons, the translator code was copied from OpenTelemetry collector codebase.
|
||||
// Over there, they tried to provide means to translate metric names following Prometheus conventions that are documented here:
|
||||
// https://prometheus.io/docs/practices/naming/
|
||||
//
|
||||
// Althogh not explicitly said, it was implied that words should be separated by a single underscore and the codebase was written
|
||||
// with that in mind.
|
||||
//
|
||||
// Now that we're allowing OTel users to have their original names stored in prometheus without any transformation, we're facing problems
|
||||
// where two (or more) UTF-8 characters are being used to separate words.
|
||||
// TODO(arthursens): Fix it!
|
||||
|
||||
// We're asserting on 'NotEqual', which proves the bug.
|
||||
require.NotEqual(t, "metric....split_=+by_//utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true))
|
||||
// Here we're asserting on 'Equal', showing the current behavior.
|
||||
require.Equal(t, "metric.split_by_utf8characters", normalizeName(createGauge("metric....split_=+by_//utf8characters", ""), "", true))
|
||||
}
|
||||
|
||||
func TestOTelReceivers(t *testing.T) {
|
||||
require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "", false))
|
||||
require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "", false))
|
||||
require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "", false))
|
||||
require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "", false))
|
||||
require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "", false))
|
||||
require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "", false))
|
||||
require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "", false))
|
||||
require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "", false))
|
||||
require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "", false))
|
||||
require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "", false))
|
||||
require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "", false))
|
||||
require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "", false))
|
||||
require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "", false))
|
||||
require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "", false))
|
||||
require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "", false))
|
||||
require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "", false))
|
||||
require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "", false))
|
||||
require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "", false))
|
||||
require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "", false))
|
||||
require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "", false))
|
||||
require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "", false))
|
||||
require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "", false))
|
||||
}
|
||||
|
||||
func TestNamespace(t *testing.T) {
|
||||
require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space", false))
|
||||
require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space", false))
|
||||
}
|
||||
|
||||
func TestCleanUpUnit(t *testing.T) {
|
||||
require.Equal(t, "", cleanUpUnit(""))
|
||||
require.Equal(t, "a_b", cleanUpUnit("a b"))
|
||||
require.Equal(t, "hello_world", cleanUpUnit("hello, world"))
|
||||
require.Equal(t, "hello_you_2", cleanUpUnit("hello you 2"))
|
||||
require.Equal(t, "1000", cleanUpUnit("$1000"))
|
||||
require.Equal(t, "", cleanUpUnit("*+$^=)"))
|
||||
}
|
||||
|
||||
func TestUnitMapGetOrDefault(t *testing.T) {
|
||||
require.Equal(t, "", unitMapGetOrDefault(""))
|
||||
require.Equal(t, "seconds", unitMapGetOrDefault("s"))
|
||||
require.Equal(t, "invalid", unitMapGetOrDefault("invalid"))
|
||||
}
|
||||
|
||||
func TestPerUnitMapGetOrDefault(t *testing.T) {
|
||||
require.Equal(t, "", perUnitMapGetOrDefault(""))
|
||||
require.Equal(t, "second", perUnitMapGetOrDefault("s"))
|
||||
require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid"))
|
||||
}
|
||||
|
||||
func TestRemoveItem(t *testing.T) {
|
||||
require.Equal(t, []string{}, removeItem([]string{}, "test"))
|
||||
require.Equal(t, []string{}, removeItem([]string{}, ""))
|
||||
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d"))
|
||||
require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, ""))
|
||||
require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c"))
|
||||
require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b"))
|
||||
require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a"))
|
||||
}
|
||||
|
||||
func TestBuildCompliantNameWithSuffixes(t *testing.T) {
|
||||
require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true, false))
|
||||
require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true, false))
|
||||
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true, false))
|
||||
require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true, false))
|
||||
require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true, false))
|
||||
require.Equal(t, ":foo::bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true, false))
|
||||
// Gauges with unit 1 are considered ratios.
|
||||
require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true, false))
|
||||
// Slashes in units are converted.
|
||||
require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true, false))
|
||||
require.Equal(t, "metric_with_foreign_characters_total", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", true, false))
|
||||
}
|
||||
|
||||
func TestBuildCompliantNameWithoutSuffixes(t *testing.T) {
|
||||
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false, false))
|
||||
require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false, false))
|
||||
require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false, false))
|
||||
require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false, false))
|
||||
require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false, false))
|
||||
require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false, false))
|
||||
require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false, false))
|
||||
require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false, false))
|
||||
require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false, false))
|
||||
require.Equal(t, "metric_with___foreign_characters", BuildCompliantName(createCounter("metric_with_字符_foreign_characters", ""), "", false, false))
|
||||
}
|
|
@ -762,7 +762,7 @@ func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) {
|
|||
Settings{
|
||||
ExportCreatedMetric: true,
|
||||
},
|
||||
prometheustranslator.BuildCompliantName(metric, "", true, true),
|
||||
prometheustranslator.BuildCompliantMetricName(metric, "", true),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, annots)
|
||||
|
|
|
@ -96,7 +96,12 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric
|
|||
continue
|
||||
}
|
||||
|
||||
promName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes, settings.AllowUTF8)
|
||||
var promName string
|
||||
if settings.AllowUTF8 {
|
||||
promName = prometheustranslator.BuildMetricName(metric, settings.Namespace, settings.AddMetricSuffixes)
|
||||
} else {
|
||||
promName = prometheustranslator.BuildCompliantMetricName(metric, settings.Namespace, settings.AddMetricSuffixes)
|
||||
}
|
||||
c.metadata = append(c.metadata, prompb.MetricMetadata{
|
||||
Type: otelMetricTypeToPromMetricType(metric),
|
||||
MetricFamilyName: promName,
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestFromMetrics(t *testing.T) {
|
|||
metricSlice := scopeMetricsSlice.At(j).Metrics()
|
||||
for k := 0; k < metricSlice.Len(); k++ {
|
||||
metric := metricSlice.At(k)
|
||||
promName := prometheustranslator.BuildCompliantName(metric, "", false, false)
|
||||
promName := prometheustranslator.BuildCompliantMetricName(metric, "", false)
|
||||
expMetadata = append(expMetadata, prompb.MetricMetadata{
|
||||
Type: otelMetricTypeToPromMetricType(metric),
|
||||
MetricFamilyName: promName,
|
||||
|
|
|
@ -550,7 +550,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr
|
|||
mm := make([]prompb.MetricMetadata, 0, len(metadata))
|
||||
for _, entry := range metadata {
|
||||
mm = append(mm, prompb.MetricMetadata{
|
||||
MetricFamilyName: entry.Metric,
|
||||
MetricFamilyName: entry.MetricFamily,
|
||||
Help: entry.Help,
|
||||
Type: prompb.FromMetadataType(entry.Type),
|
||||
Unit: entry.Unit,
|
||||
|
@ -1919,12 +1919,17 @@ func populateV2TimeSeries(symbolTable *writev2.SymbolsTable, batch []timeSeries,
|
|||
var nPendingSamples, nPendingExemplars, nPendingHistograms, nPendingMetadata int
|
||||
for nPending, d := range batch {
|
||||
pendingData[nPending].Samples = pendingData[nPending].Samples[:0]
|
||||
// todo: should we also safeguard against empty metadata here?
|
||||
if d.metadata != nil {
|
||||
pendingData[nPending].Metadata.Type = writev2.FromMetadataType(d.metadata.Type)
|
||||
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Help)
|
||||
pendingData[nPending].Metadata.HelpRef = symbolTable.Symbolize(d.metadata.Unit)
|
||||
pendingData[nPending].Metadata.UnitRef = symbolTable.Symbolize(d.metadata.Unit)
|
||||
nPendingMetadata++
|
||||
} else {
|
||||
// Safeguard against sending garbage in case of not having metadata
|
||||
// for whatever reason.
|
||||
pendingData[nPending].Metadata.Type = writev2.Metadata_METRIC_TYPE_UNSPECIFIED
|
||||
pendingData[nPending].Metadata.HelpRef = 0
|
||||
pendingData[nPending].Metadata.UnitRef = 0
|
||||
}
|
||||
|
||||
if sendExemplars {
|
||||
|
@ -2119,7 +2124,7 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed []
|
|||
}
|
||||
return compressed, nil
|
||||
default:
|
||||
return compressed, fmt.Errorf("Unknown compression scheme [%v]", enc)
|
||||
return compressed, fmt.Errorf("unknown compression scheme [%v]", enc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -342,10 +342,10 @@ func TestMetadataDelivery(t *testing.T) {
|
|||
numMetadata := 1532
|
||||
for i := 0; i < numMetadata; i++ {
|
||||
metadata = append(metadata, scrape.MetricMetadata{
|
||||
Metric: "prometheus_remote_storage_sent_metadata_bytes_total_" + strconv.Itoa(i),
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "a nice help text",
|
||||
Unit: "",
|
||||
MetricFamily: "prometheus_remote_storage_sent_metadata_bytes_" + strconv.Itoa(i),
|
||||
Type: model.MetricTypeCounter,
|
||||
Help: "a nice help text",
|
||||
Unit: "",
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ func TestMetadataDelivery(t *testing.T) {
|
|||
// fit into MaxSamplesPerSend.
|
||||
require.Equal(t, numMetadata/config.DefaultMetadataConfig.MaxSamplesPerSend+1, c.writesReceived)
|
||||
// Make sure the last samples were sent.
|
||||
require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].Metric][0].MetricFamilyName, metadata[len(metadata)-1].Metric)
|
||||
require.Equal(t, c.receivedMetadata[metadata[len(metadata)-1].MetricFamily][0].MetricFamilyName, metadata[len(metadata)-1].MetricFamily)
|
||||
}
|
||||
|
||||
func TestWALMetadataDelivery(t *testing.T) {
|
||||
|
|
|
@ -56,10 +56,10 @@ func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable stor
|
|||
marshalPool: &sync.Pool{},
|
||||
|
||||
queries: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "api", // TODO: changes to storage in Prometheus 3.0.
|
||||
Name: "remote_read_queries",
|
||||
Help: "The current number of remote read queries being executed or waiting.",
|
||||
Namespace: namespace,
|
||||
Subsystem: "remote_read_handler",
|
||||
Name: "queries",
|
||||
Help: "The current number of remote read queries that are either in execution or queued on the handler.",
|
||||
}),
|
||||
}
|
||||
if r != nil {
|
||||
|
|
|
@ -38,6 +38,13 @@ import (
|
|||
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
otlptranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite"
|
||||
|
||||
deltatocumulative "github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor"
|
||||
"go.opentelemetry.io/collector/component"
|
||||
"go.opentelemetry.io/collector/consumer"
|
||||
"go.opentelemetry.io/collector/pdata/pmetric"
|
||||
"go.opentelemetry.io/collector/processor"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
)
|
||||
|
||||
type writeHandler struct {
|
||||
|
@ -517,56 +524,107 @@ func (h *writeHandler) handleHistogramZeroSample(app storage.Appender, ref stora
|
|||
return ref, err
|
||||
}
|
||||
|
||||
type OTLPOptions struct {
|
||||
// Convert delta samples to their cumulative equivalent by aggregating in-memory
|
||||
ConvertDelta bool
|
||||
}
|
||||
|
||||
// NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and
|
||||
// writes them to the provided appendable.
|
||||
func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config) http.Handler {
|
||||
rwHandler := &writeHandler{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
func NewOTLPWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, configFunc func() config.Config, opts OTLPOptions) http.Handler {
|
||||
ex := &rwExporter{
|
||||
writeHandler: &writeHandler{
|
||||
logger: logger,
|
||||
appendable: appendable,
|
||||
},
|
||||
config: configFunc,
|
||||
}
|
||||
|
||||
return &otlpWriteHandler{
|
||||
logger: logger,
|
||||
rwHandler: rwHandler,
|
||||
configFunc: configFunc,
|
||||
wh := &otlpWriteHandler{logger: logger, cumul: ex}
|
||||
|
||||
if opts.ConvertDelta {
|
||||
fac := deltatocumulative.NewFactory()
|
||||
set := processor.Settings{TelemetrySettings: component.TelemetrySettings{MeterProvider: noop.NewMeterProvider()}}
|
||||
d2c, err := fac.CreateMetrics(context.Background(), set, fac.CreateDefaultConfig(), wh.cumul)
|
||||
if err != nil {
|
||||
// fac.CreateMetrics directly calls [deltatocumulativeprocessor.createMetricsProcessor],
|
||||
// which only errors if:
|
||||
// - cfg.(type) != *Config
|
||||
// - telemetry.New fails due to bad set.TelemetrySettings
|
||||
//
|
||||
// both cannot be the case, as we pass a valid *Config and valid TelemetrySettings.
|
||||
// as such, we assume this error to never occur.
|
||||
// if it is, our assumptions are broken in which case a panic seems acceptable.
|
||||
panic(err)
|
||||
}
|
||||
if err := d2c.Start(context.Background(), nil); err != nil {
|
||||
// deltatocumulative does not error on start. see above for panic reasoning
|
||||
panic(err)
|
||||
}
|
||||
wh.delta = d2c
|
||||
}
|
||||
|
||||
return wh
|
||||
}
|
||||
|
||||
type otlpWriteHandler struct {
|
||||
logger *slog.Logger
|
||||
rwHandler *writeHandler
|
||||
configFunc func() config.Config
|
||||
type rwExporter struct {
|
||||
*writeHandler
|
||||
config func() config.Config
|
||||
}
|
||||
|
||||
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
req, err := DecodeOTLPWriteRequest(r)
|
||||
if err != nil {
|
||||
h.logger.Error("Error decoding remote write request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
otlpCfg := h.configFunc().OTLPConfig
|
||||
func (rw *rwExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error {
|
||||
otlpCfg := rw.config().OTLPConfig
|
||||
|
||||
converter := otlptranslator.NewPrometheusConverter()
|
||||
annots, err := converter.FromMetrics(r.Context(), req.Metrics(), otlptranslator.Settings{
|
||||
annots, err := converter.FromMetrics(ctx, md, otlptranslator.Settings{
|
||||
AddMetricSuffixes: true,
|
||||
AllowUTF8: otlpCfg.TranslationStrategy == config.NoUTF8EscapingWithSuffixes,
|
||||
PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes,
|
||||
KeepIdentifyingResourceAttributes: otlpCfg.KeepIdentifyingResourceAttributes,
|
||||
})
|
||||
if err != nil {
|
||||
h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)
|
||||
rw.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err)
|
||||
}
|
||||
ws, _ := annots.AsStrings("", 0, 0)
|
||||
if len(ws) > 0 {
|
||||
h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
|
||||
rw.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws)
|
||||
}
|
||||
|
||||
err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{
|
||||
err = rw.write(ctx, &prompb.WriteRequest{
|
||||
Timeseries: converter.TimeSeries(),
|
||||
Metadata: converter.Metadata(),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (rw *rwExporter) Capabilities() consumer.Capabilities {
|
||||
return consumer.Capabilities{MutatesData: false}
|
||||
}
|
||||
|
||||
type otlpWriteHandler struct {
|
||||
logger *slog.Logger
|
||||
|
||||
cumul consumer.Metrics // only cumulative
|
||||
delta consumer.Metrics // delta capable
|
||||
}
|
||||
|
||||
func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
req, err := DecodeOTLPWriteRequest(r)
|
||||
if err != nil {
|
||||
h.logger.Error("Error decoding OTLP write request", "err", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
md := req.Metrics()
|
||||
// if delta conversion enabled AND delta samples exist, use slower delta capable path
|
||||
if h.delta != nil && hasDelta(md) {
|
||||
err = h.delta.ConsumeMetrics(r.Context(), md)
|
||||
} else {
|
||||
// deltatocumulative currently holds a sync.Mutex when entering ConsumeMetrics.
|
||||
// This is slow and not necessary when no delta samples exist anyways
|
||||
err = h.cumul.ConsumeMetrics(r.Context(), md)
|
||||
}
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
|
@ -583,6 +641,31 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func hasDelta(md pmetric.Metrics) bool {
|
||||
for i := range md.ResourceMetrics().Len() {
|
||||
sms := md.ResourceMetrics().At(i).ScopeMetrics()
|
||||
for i := range sms.Len() {
|
||||
ms := sms.At(i).Metrics()
|
||||
for i := range ms.Len() {
|
||||
temporality := pmetric.AggregationTemporalityUnspecified
|
||||
m := ms.At(i)
|
||||
switch ms.At(i).Type() {
|
||||
case pmetric.MetricTypeSum:
|
||||
temporality = m.Sum().AggregationTemporality()
|
||||
case pmetric.MetricTypeExponentialHistogram:
|
||||
temporality = m.ExponentialHistogram().AggregationTemporality()
|
||||
case pmetric.MetricTypeHistogram:
|
||||
temporality = m.Histogram().AggregationTemporality()
|
||||
}
|
||||
if temporality == pmetric.AggregationTemporalityDelta {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type timeLimitAppender struct {
|
||||
storage.Appender
|
||||
|
||||
|
|
|
@ -15,13 +15,23 @@ package remote
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
common_config "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -31,8 +41,10 @@ import (
|
|||
"go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/relabel"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
func testRemoteWriteConfig() *config.RemoteWriteConfig {
|
||||
|
@ -379,11 +391,11 @@ func TestOTLPWriteHandler(t *testing.T) {
|
|||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
appendable := &mockAppendable{}
|
||||
handler := NewOTLPWriteHandler(nil, appendable, func() config.Config {
|
||||
handler := NewOTLPWriteHandler(nil, nil, appendable, func() config.Config {
|
||||
return config.Config{
|
||||
OTLPConfig: config.DefaultOTLPConfig,
|
||||
}
|
||||
})
|
||||
}, OTLPOptions{})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
handler.ServeHTTP(recorder, req)
|
||||
|
@ -476,3 +488,364 @@ func generateOTLPWriteRequest() pmetricotlp.ExportRequest {
|
|||
|
||||
return pmetricotlp.NewExportRequestFromMetrics(d)
|
||||
}
|
||||
|
||||
func TestOTLPDelta(t *testing.T) {
|
||||
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
|
||||
appendable := &mockAppendable{}
|
||||
cfg := func() config.Config {
|
||||
return config.Config{OTLPConfig: config.DefaultOTLPConfig}
|
||||
}
|
||||
handler := NewOTLPWriteHandler(log, nil, appendable, cfg, OTLPOptions{ConvertDelta: true})
|
||||
|
||||
md := pmetric.NewMetrics()
|
||||
ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()
|
||||
|
||||
m := ms.AppendEmpty()
|
||||
m.SetName("some.delta.total")
|
||||
|
||||
sum := m.SetEmptySum()
|
||||
sum.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
|
||||
|
||||
ts := time.Date(2000, 1, 2, 3, 4, 0, 0, time.UTC)
|
||||
for i := range 3 {
|
||||
dp := sum.DataPoints().AppendEmpty()
|
||||
dp.SetIntValue(int64(i))
|
||||
dp.SetTimestamp(pcommon.NewTimestampFromTime(ts.Add(time.Duration(i) * time.Second)))
|
||||
}
|
||||
|
||||
proto, err := pmetricotlp.NewExportRequestFromMetrics(md).MarshalProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(proto))
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
rec := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rec, req)
|
||||
require.Equal(t, http.StatusOK, rec.Result().StatusCode)
|
||||
|
||||
ls := labels.FromStrings("__name__", "some_delta_total")
|
||||
milli := func(sec int) int64 {
|
||||
return time.Date(2000, 1, 2, 3, 4, sec, 0, time.UTC).UnixMilli()
|
||||
}
|
||||
|
||||
want := []mockSample{
|
||||
{t: milli(0), l: ls, v: 0}, // +0
|
||||
{t: milli(1), l: ls, v: 1}, // +1
|
||||
{t: milli(2), l: ls, v: 3}, // +2
|
||||
}
|
||||
if diff := cmp.Diff(want, appendable.samples, cmp.Exporter(func(_ reflect.Type) bool { return true })); diff != "" {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkOTLP(b *testing.B) {
|
||||
start := time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
|
||||
|
||||
type Type struct {
|
||||
name string
|
||||
data func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric
|
||||
}
|
||||
types := []Type{{
|
||||
name: "sum",
|
||||
data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
|
||||
cumul := make(map[int]float64)
|
||||
return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
|
||||
m := pmetric.NewMetric()
|
||||
sum := m.SetEmptySum()
|
||||
sum.SetAggregationTemporality(mode)
|
||||
dps := sum.DataPoints()
|
||||
for id := range dpc {
|
||||
dp := dps.AppendEmpty()
|
||||
dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
|
||||
dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
|
||||
dp.Attributes().PutStr("id", strconv.Itoa(id))
|
||||
v := float64(rand.IntN(100)) / 10
|
||||
switch mode {
|
||||
case pmetric.AggregationTemporalityDelta:
|
||||
dp.SetDoubleValue(v)
|
||||
case pmetric.AggregationTemporalityCumulative:
|
||||
cumul[id] += v
|
||||
dp.SetDoubleValue(cumul[id])
|
||||
}
|
||||
}
|
||||
return []pmetric.Metric{m}
|
||||
}
|
||||
}(),
|
||||
}, {
|
||||
name: "histogram",
|
||||
data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
|
||||
bounds := [4]float64{1, 10, 100, 1000}
|
||||
type state struct {
|
||||
counts [4]uint64
|
||||
count uint64
|
||||
sum float64
|
||||
}
|
||||
var cumul []state
|
||||
return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
|
||||
if cumul == nil {
|
||||
cumul = make([]state, dpc)
|
||||
}
|
||||
m := pmetric.NewMetric()
|
||||
hist := m.SetEmptyHistogram()
|
||||
hist.SetAggregationTemporality(mode)
|
||||
dps := hist.DataPoints()
|
||||
for id := range dpc {
|
||||
dp := dps.AppendEmpty()
|
||||
dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
|
||||
dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
|
||||
dp.Attributes().PutStr("id", strconv.Itoa(id))
|
||||
dp.ExplicitBounds().FromRaw(bounds[:])
|
||||
|
||||
var obs *state
|
||||
switch mode {
|
||||
case pmetric.AggregationTemporalityDelta:
|
||||
obs = new(state)
|
||||
case pmetric.AggregationTemporalityCumulative:
|
||||
obs = &cumul[id]
|
||||
}
|
||||
|
||||
for i := range obs.counts {
|
||||
v := uint64(rand.IntN(10))
|
||||
obs.counts[i] += v
|
||||
obs.count++
|
||||
obs.sum += float64(v)
|
||||
}
|
||||
|
||||
dp.SetCount(obs.count)
|
||||
dp.SetSum(obs.sum)
|
||||
dp.BucketCounts().FromRaw(obs.counts[:])
|
||||
}
|
||||
return []pmetric.Metric{m}
|
||||
}
|
||||
}(),
|
||||
}, {
|
||||
name: "exponential",
|
||||
data: func() func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
|
||||
type state struct {
|
||||
counts [4]uint64
|
||||
count uint64
|
||||
sum float64
|
||||
}
|
||||
var cumul []state
|
||||
return func(mode pmetric.AggregationTemporality, dpc, epoch int) []pmetric.Metric {
|
||||
if cumul == nil {
|
||||
cumul = make([]state, dpc)
|
||||
}
|
||||
m := pmetric.NewMetric()
|
||||
ex := m.SetEmptyExponentialHistogram()
|
||||
ex.SetAggregationTemporality(mode)
|
||||
dps := ex.DataPoints()
|
||||
for id := range dpc {
|
||||
dp := dps.AppendEmpty()
|
||||
dp.SetStartTimestamp(pcommon.NewTimestampFromTime(start))
|
||||
dp.SetTimestamp(pcommon.NewTimestampFromTime(start.Add(time.Duration(epoch) * time.Minute)))
|
||||
dp.Attributes().PutStr("id", strconv.Itoa(id))
|
||||
dp.SetScale(2)
|
||||
|
||||
var obs *state
|
||||
switch mode {
|
||||
case pmetric.AggregationTemporalityDelta:
|
||||
obs = new(state)
|
||||
case pmetric.AggregationTemporalityCumulative:
|
||||
obs = &cumul[id]
|
||||
}
|
||||
|
||||
for i := range obs.counts {
|
||||
v := uint64(rand.IntN(10))
|
||||
obs.counts[i] += v
|
||||
obs.count++
|
||||
obs.sum += float64(v)
|
||||
}
|
||||
|
||||
dp.Positive().BucketCounts().FromRaw(obs.counts[:])
|
||||
dp.SetCount(obs.count)
|
||||
dp.SetSum(obs.sum)
|
||||
}
|
||||
|
||||
return []pmetric.Metric{m}
|
||||
}
|
||||
}(),
|
||||
}}
|
||||
|
||||
modes := []struct {
|
||||
name string
|
||||
data func(func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, int) []pmetric.Metric
|
||||
}{{
|
||||
name: "cumulative",
|
||||
data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
|
||||
return data(pmetric.AggregationTemporalityCumulative, 10, epoch)
|
||||
},
|
||||
}, {
|
||||
name: "delta",
|
||||
data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
|
||||
return data(pmetric.AggregationTemporalityDelta, 10, epoch)
|
||||
},
|
||||
}, {
|
||||
name: "mixed",
|
||||
data: func(data func(pmetric.AggregationTemporality, int, int) []pmetric.Metric, epoch int) []pmetric.Metric {
|
||||
cumul := data(pmetric.AggregationTemporalityCumulative, 5, epoch)
|
||||
delta := data(pmetric.AggregationTemporalityDelta, 5, epoch)
|
||||
out := append(cumul, delta...)
|
||||
rand.Shuffle(len(out), func(i, j int) { out[i], out[j] = out[j], out[i] })
|
||||
return out
|
||||
},
|
||||
}}
|
||||
|
||||
configs := []struct {
|
||||
name string
|
||||
opts OTLPOptions
|
||||
}{
|
||||
{name: "default"},
|
||||
{name: "convert", opts: OTLPOptions{ConvertDelta: true}},
|
||||
}
|
||||
|
||||
Workers := runtime.GOMAXPROCS(0)
|
||||
for _, cs := range types {
|
||||
for _, mode := range modes {
|
||||
for _, cfg := range configs {
|
||||
b.Run(fmt.Sprintf("type=%s/temporality=%s/cfg=%s", cs.name, mode.name, cfg.name), func(b *testing.B) {
|
||||
if !cfg.opts.ConvertDelta && (mode.name == "delta" || mode.name == "mixed") {
|
||||
b.Skip("not possible")
|
||||
}
|
||||
|
||||
var total int
|
||||
|
||||
// reqs is a [b.N]*http.Request, divided across the workers.
|
||||
// deltatocumulative requires timestamps to be strictly in
|
||||
// order on a per-series basis. to ensure this, each reqs[k]
|
||||
// contains samples of differently named series, sorted
|
||||
// strictly in time order
|
||||
reqs := make([][]*http.Request, Workers)
|
||||
for n := range b.N {
|
||||
k := n % Workers
|
||||
|
||||
md := pmetric.NewMetrics()
|
||||
ms := md.ResourceMetrics().AppendEmpty().
|
||||
ScopeMetrics().AppendEmpty().
|
||||
Metrics()
|
||||
|
||||
for i, m := range mode.data(cs.data, n) {
|
||||
m.SetName(fmt.Sprintf("benchmark_%d_%d", k, i))
|
||||
m.MoveTo(ms.AppendEmpty())
|
||||
}
|
||||
|
||||
total += sampleCount(md)
|
||||
|
||||
ex := pmetricotlp.NewExportRequestFromMetrics(md)
|
||||
data, err := ex.MarshalProto()
|
||||
require.NoError(b, err)
|
||||
|
||||
req, err := http.NewRequest("", "", bytes.NewReader(data))
|
||||
require.NoError(b, err)
|
||||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
reqs[k] = append(reqs[k], req)
|
||||
}
|
||||
|
||||
log := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelWarn}))
|
||||
mock := new(mockAppendable)
|
||||
appendable := syncAppendable{Appendable: mock, lock: new(sync.Mutex)}
|
||||
cfgfn := func() config.Config {
|
||||
return config.Config{OTLPConfig: config.DefaultOTLPConfig}
|
||||
}
|
||||
handler := NewOTLPWriteHandler(log, nil, appendable, cfgfn, cfg.opts)
|
||||
|
||||
fail := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
|
||||
// we use multiple workers to mimic a real-world scenario
|
||||
// where multiple OTel collectors are sending their
|
||||
// time-series in parallel.
|
||||
// this is necessary to exercise potential lock-contention
|
||||
// in this benchmark
|
||||
for k := range Workers {
|
||||
go func() {
|
||||
rec := httptest.NewRecorder()
|
||||
for _, req := range reqs[k] {
|
||||
handler.ServeHTTP(rec, req)
|
||||
if rec.Result().StatusCode != http.StatusOK {
|
||||
fail <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
done <- struct{}{}
|
||||
}()
|
||||
}
|
||||
|
||||
for range Workers {
|
||||
select {
|
||||
case <-fail:
|
||||
b.FailNow()
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(b, total, len(mock.samples)+len(mock.histograms))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sampleCount(md pmetric.Metrics) int {
|
||||
var total int
|
||||
rms := md.ResourceMetrics()
|
||||
for i := range rms.Len() {
|
||||
sms := rms.At(i).ScopeMetrics()
|
||||
for i := range sms.Len() {
|
||||
ms := sms.At(i).Metrics()
|
||||
for i := range ms.Len() {
|
||||
m := ms.At(i)
|
||||
switch m.Type() {
|
||||
case pmetric.MetricTypeSum:
|
||||
total += m.Sum().DataPoints().Len()
|
||||
case pmetric.MetricTypeGauge:
|
||||
total += m.Gauge().DataPoints().Len()
|
||||
case pmetric.MetricTypeHistogram:
|
||||
dps := m.Histogram().DataPoints()
|
||||
for i := range dps.Len() {
|
||||
total += dps.At(i).BucketCounts().Len()
|
||||
total++ // le=+Inf series
|
||||
total++ // _sum series
|
||||
total++ // _count series
|
||||
}
|
||||
case pmetric.MetricTypeExponentialHistogram:
|
||||
total += m.ExponentialHistogram().DataPoints().Len()
|
||||
case pmetric.MetricTypeSummary:
|
||||
total += m.Summary().DataPoints().Len()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
type syncAppendable struct {
|
||||
lock sync.Locker
|
||||
storage.Appendable
|
||||
}
|
||||
|
||||
type syncAppender struct {
|
||||
lock sync.Locker
|
||||
storage.Appender
|
||||
}
|
||||
|
||||
func (s syncAppendable) Appender(ctx context.Context) storage.Appender {
|
||||
return syncAppender{Appender: s.Appendable.Appender(ctx), lock: s.lock}
|
||||
}
|
||||
|
||||
func (s syncAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
return s.Appender.Append(ref, l, t, v)
|
||||
}
|
||||
|
||||
func (s syncAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, f *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
return s.Appender.AppendHistogram(ref, l, t, h, f)
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import (
|
|||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
common_templates "github.com/prometheus/common/helpers/templates"
|
||||
|
||||
|
@ -166,7 +168,7 @@ func NewTemplateExpander(
|
|||
return html_template.HTML(text)
|
||||
},
|
||||
"match": regexp.MatchString,
|
||||
"title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package.
|
||||
"title": cases.Title(language.AmericanEnglish, cases.NoLower).String,
|
||||
"toUpper": strings.ToUpper,
|
||||
"toLower": strings.ToLower,
|
||||
"graphLink": strutil.GraphLinkForExpression,
|
||||
|
|
|
@ -463,7 +463,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
return
|
||||
}
|
||||
decoded <- samples
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
histograms := histogramsPool.Get()[:0]
|
||||
histograms, err = dec.HistogramSamples(rec, histograms)
|
||||
if err != nil {
|
||||
|
@ -475,7 +475,7 @@ func (db *DB) loadWAL(r *wlog.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H
|
|||
return
|
||||
}
|
||||
decoded <- histograms
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
floatHistograms := floatHistogramsPool.Get()[:0]
|
||||
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
||||
if err != nil {
|
||||
|
@ -1154,19 +1154,39 @@ func (a *appender) log() error {
|
|||
}
|
||||
|
||||
if len(a.pendingHistograms) > 0 {
|
||||
buf = encoder.HistogramSamples(a.pendingHistograms, buf)
|
||||
if err := a.wal.Log(buf); err != nil {
|
||||
return err
|
||||
var customBucketsHistograms []record.RefHistogramSample
|
||||
buf, customBucketsHistograms = encoder.HistogramSamples(a.pendingHistograms, buf)
|
||||
if len(buf) > 0 {
|
||||
if err := a.wal.Log(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:0]
|
||||
}
|
||||
if len(customBucketsHistograms) > 0 {
|
||||
buf = encoder.CustomBucketsHistogramSamples(customBucketsHistograms, nil)
|
||||
if err := a.wal.Log(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:0]
|
||||
}
|
||||
buf = buf[:0]
|
||||
}
|
||||
|
||||
if len(a.pendingFloatHistograms) > 0 {
|
||||
buf = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf)
|
||||
if err := a.wal.Log(buf); err != nil {
|
||||
return err
|
||||
var customBucketsFloatHistograms []record.RefFloatHistogramSample
|
||||
buf, customBucketsFloatHistograms = encoder.FloatHistogramSamples(a.pendingFloatHistograms, buf)
|
||||
if len(buf) > 0 {
|
||||
if err := a.wal.Log(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:0]
|
||||
}
|
||||
if len(customBucketsFloatHistograms) > 0 {
|
||||
buf = encoder.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, nil)
|
||||
if err := a.wal.Log(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:0]
|
||||
}
|
||||
buf = buf[:0]
|
||||
}
|
||||
|
||||
if len(a.pendingExamplars) > 0 {
|
||||
|
|
|
@ -163,6 +163,18 @@ func TestCommit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
customBucketHistograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), customBucketHistograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -175,6 +187,18 @@ func TestCommit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
customBucketFloatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, customBucketFloatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
require.NoError(t, s.Close())
|
||||
|
||||
|
@ -206,13 +230,13 @@ func TestCommit(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
walSamplesCount += len(samples)
|
||||
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
var histograms []record.RefHistogramSample
|
||||
histograms, err = dec.HistogramSamples(rec, histograms)
|
||||
require.NoError(t, err)
|
||||
walHistogramCount += len(histograms)
|
||||
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
var floatHistograms []record.RefFloatHistogramSample
|
||||
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
||||
require.NoError(t, err)
|
||||
|
@ -229,11 +253,11 @@ func TestCommit(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check that the WAL contained the same number of committed series/samples/exemplars.
|
||||
require.Equal(t, numSeries*3, walSeriesCount, "unexpected number of series")
|
||||
require.Equal(t, numSeries*5, walSeriesCount, "unexpected number of series")
|
||||
require.Equal(t, numSeries*numDatapoints, walSamplesCount, "unexpected number of samples")
|
||||
require.Equal(t, numSeries*numDatapoints, walExemplarsCount, "unexpected number of exemplars")
|
||||
require.Equal(t, numSeries*numHistograms, walHistogramCount, "unexpected number of histograms")
|
||||
require.Equal(t, numSeries*numHistograms, walFloatHistogramCount, "unexpected number of float histograms")
|
||||
require.Equal(t, numSeries*numHistograms*2, walHistogramCount, "unexpected number of histograms")
|
||||
require.Equal(t, numSeries*numHistograms*2, walFloatHistogramCount, "unexpected number of float histograms")
|
||||
}
|
||||
|
||||
func TestRollback(t *testing.T) {
|
||||
|
@ -269,6 +293,18 @@ func TestRollback(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -281,6 +317,18 @@ func TestRollback(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Do a rollback, which should clear uncommitted data. A followup call to
|
||||
// commit should persist nothing to the WAL.
|
||||
require.NoError(t, app.Rollback())
|
||||
|
@ -321,13 +369,13 @@ func TestRollback(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
walExemplarsCount += len(exemplars)
|
||||
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
var histograms []record.RefHistogramSample
|
||||
histograms, err = dec.HistogramSamples(rec, histograms)
|
||||
require.NoError(t, err)
|
||||
walHistogramCount += len(histograms)
|
||||
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
var floatHistograms []record.RefFloatHistogramSample
|
||||
floatHistograms, err = dec.FloatHistogramSamples(rec, floatHistograms)
|
||||
require.NoError(t, err)
|
||||
|
@ -338,7 +386,7 @@ func TestRollback(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check that only series get stored after calling Rollback.
|
||||
require.Equal(t, numSeries*3, walSeriesCount, "series should have been written to WAL")
|
||||
require.Equal(t, numSeries*5, walSeriesCount, "series should have been written to WAL")
|
||||
require.Equal(t, 0, walSamplesCount, "samples should not have been written to WAL")
|
||||
require.Equal(t, 0, walExemplarsCount, "exemplars should not have been written to WAL")
|
||||
require.Equal(t, 0, walHistogramCount, "histograms should not have been written to WAL")
|
||||
|
@ -387,6 +435,19 @@ func TestFullTruncateWAL(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(lastTs), histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -400,11 +461,24 @@ func TestFullTruncateWAL(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(lastTs), nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
// Truncate WAL with mint to GC all the samples.
|
||||
s.truncate(lastTs + 1)
|
||||
|
||||
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
|
||||
require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
|
||||
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
|
||||
}
|
||||
|
||||
func TestPartialTruncateWAL(t *testing.T) {
|
||||
|
@ -414,7 +488,6 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
)
|
||||
|
||||
opts := DefaultOptions()
|
||||
opts.TruncateFrequency = time.Minute * 2
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
s := createTestAgentDB(t, reg, opts)
|
||||
|
@ -449,6 +522,19 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-1", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram_batch-1", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -462,6 +548,19 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-1", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
// Create second batch of 800 series with 1000 data-points with a fixed lastTs as 600.
|
||||
lastTs = 600
|
||||
lbls = labelsForTest(t.Name()+"batch-2", numSeries)
|
||||
|
@ -488,6 +587,19 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram_batch-2", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram_batch-2", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -501,11 +613,25 @@ func TestPartialTruncateWAL(t *testing.T) {
|
|||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram_batch-2", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numDatapoints)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.NoError(t, app.Commit())
|
||||
}
|
||||
|
||||
// Truncate WAL with mint to GC only the first batch of 800 series and retaining 2nd batch of 800 series.
|
||||
s.truncate(lastTs - 1)
|
||||
|
||||
m := gatherFamily(t, reg, "prometheus_agent_deleted_series")
|
||||
require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
|
||||
require.Len(t, m.Metric, 1)
|
||||
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal truncate mismatch of deleted series count")
|
||||
}
|
||||
|
||||
func TestWALReplay(t *testing.T) {
|
||||
|
@ -541,6 +667,18 @@ func TestWALReplay(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -553,6 +691,18 @@ func TestWALReplay(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numHistograms; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, lastTs, nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
require.NoError(t, s.Close())
|
||||
|
||||
|
@ -571,7 +721,7 @@ func TestWALReplay(t *testing.T) {
|
|||
|
||||
// Check if all the series are retrieved back from the WAL.
|
||||
m := gatherFamily(t, reg, "prometheus_agent_active_series")
|
||||
require.Equal(t, float64(numSeries*3), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
|
||||
require.Equal(t, float64(numSeries*5), m.Metric[0].Gauge.GetValue(), "agent wal replay mismatch of active series count")
|
||||
|
||||
// Check if lastTs of the samples retrieved from the WAL is retained.
|
||||
metrics := replayStorage.series.series
|
||||
|
@ -803,6 +953,18 @@ func TestDBAllowOOOSamples(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
|
||||
|
||||
for i := offset; i < numDatapoints+offset; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i-offset], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -815,10 +977,22 @@ func TestDBAllowOOOSamples(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
|
||||
|
||||
for i := offset; i < numDatapoints+offset; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i-offset])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
m := gatherFamily(t, reg, "prometheus_agent_samples_appended_total")
|
||||
require.Equal(t, float64(20), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
|
||||
require.Equal(t, float64(40), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
|
||||
require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
|
||||
require.NoError(t, s.Close())
|
||||
|
||||
// Hack: s.wal.Dir() is the /wal subdirectory of the original storage path.
|
||||
|
@ -867,6 +1041,18 @@ func TestDBAllowOOOSamples(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_histogram", numSeries*2)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
histograms := tsdbutil.GenerateTestCustomBucketsHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), histograms[i], nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_float_histogram", numSeries*2)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
@ -879,10 +1065,22 @@ func TestDBAllowOOOSamples(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
lbls = labelsForTest(t.Name()+"_custom_buckets_float_histogram", numSeries*2)
|
||||
for _, l := range lbls {
|
||||
lset := labels.New(l...)
|
||||
|
||||
floatHistograms := tsdbutil.GenerateTestCustomBucketsFloatHistograms(numHistograms)
|
||||
|
||||
for i := 0; i < numDatapoints; i++ {
|
||||
_, err := app.AppendHistogram(0, lset, int64(i), nil, floatHistograms[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, app.Commit())
|
||||
m = gatherFamily(t, reg2, "prometheus_agent_samples_appended_total")
|
||||
require.Equal(t, float64(40), m.Metric[0].Counter.GetValue(), "agent wal mismatch of total appended samples")
|
||||
require.Equal(t, float64(80), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
|
||||
require.Equal(t, float64(160), m.Metric[1].Counter.GetValue(), "agent wal mismatch of total appended histograms")
|
||||
require.NoError(t, db.Close())
|
||||
}
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ type BlockMetaCompaction struct {
|
|||
}
|
||||
|
||||
func (bm *BlockMetaCompaction) SetOutOfOrder() {
|
||||
if bm.containsHint(CompactionHintFromOutOfOrder) {
|
||||
if bm.FromOutOfOrder() {
|
||||
return
|
||||
}
|
||||
bm.Hints = append(bm.Hints, CompactionHintFromOutOfOrder)
|
||||
|
@ -229,16 +229,7 @@ func (bm *BlockMetaCompaction) SetOutOfOrder() {
|
|||
}
|
||||
|
||||
func (bm *BlockMetaCompaction) FromOutOfOrder() bool {
|
||||
return bm.containsHint(CompactionHintFromOutOfOrder)
|
||||
}
|
||||
|
||||
func (bm *BlockMetaCompaction) containsHint(hint string) bool {
|
||||
for _, h := range bm.Hints {
|
||||
if h == hint {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(bm.Hints, CompactionHintFromOutOfOrder)
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
224
tsdb/db_test.go
224
tsdb/db_test.go
|
@ -4281,6 +4281,188 @@ func TestOOOWALWrite(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
"custom buckets histogram": {
|
||||
appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) {
|
||||
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), tsdbutil.GenerateTestCustomBucketsHistogram(mins), nil)
|
||||
require.NoError(t, err)
|
||||
return seriesRef, nil
|
||||
},
|
||||
expectedOOORecords: []interface{}{
|
||||
// The MmapRef in this are not hand calculated, and instead taken from the test run.
|
||||
// What is important here is the order of records, and that MmapRef increases for each record.
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 1},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestCustomBucketsHistogram(40)},
|
||||
},
|
||||
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 2},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestCustomBucketsHistogram(42)},
|
||||
},
|
||||
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestCustomBucketsHistogram(45)},
|
||||
{Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestCustomBucketsHistogram(35)},
|
||||
},
|
||||
[]record.RefMmapMarker{ // 3rd sample, hence m-mapped.
|
||||
{Ref: 1, MmapRef: 0x100000000 + 8},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestCustomBucketsHistogram(36)},
|
||||
{Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestCustomBucketsHistogram(37)},
|
||||
},
|
||||
|
||||
[]record.RefMmapMarker{ // 3rd sample, hence m-mapped.
|
||||
{Ref: 1, MmapRef: 0x100000000 + 82},
|
||||
},
|
||||
[]record.RefHistogramSample{ // Does not contain the in-order sample here.
|
||||
{Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
|
||||
},
|
||||
|
||||
// Single commit but multiple OOO records.
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 2, MmapRef: 0x100000000 + 160},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
|
||||
{Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestCustomBucketsHistogram(51)},
|
||||
},
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 2, MmapRef: 0x100000000 + 239},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestCustomBucketsHistogram(52)},
|
||||
{Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)},
|
||||
},
|
||||
},
|
||||
expectedInORecords: []interface{}{
|
||||
[]record.RefSeries{
|
||||
{Ref: 1, Labels: s1},
|
||||
{Ref: 2, Labels: s2},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 1, T: minutes(60), H: tsdbutil.GenerateTestCustomBucketsHistogram(60)},
|
||||
{Ref: 2, T: minutes(60), H: tsdbutil.GenerateTestCustomBucketsHistogram(60)},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 1, T: minutes(40), H: tsdbutil.GenerateTestCustomBucketsHistogram(40)},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(42), H: tsdbutil.GenerateTestCustomBucketsHistogram(42)},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(45), H: tsdbutil.GenerateTestCustomBucketsHistogram(45)},
|
||||
{Ref: 1, T: minutes(35), H: tsdbutil.GenerateTestCustomBucketsHistogram(35)},
|
||||
{Ref: 1, T: minutes(36), H: tsdbutil.GenerateTestCustomBucketsHistogram(36)},
|
||||
{Ref: 1, T: minutes(37), H: tsdbutil.GenerateTestCustomBucketsHistogram(37)},
|
||||
},
|
||||
[]record.RefHistogramSample{ // Contains both in-order and ooo sample.
|
||||
{Ref: 1, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
|
||||
{Ref: 2, T: minutes(65), H: tsdbutil.GenerateTestCustomBucketsHistogram(65)},
|
||||
},
|
||||
[]record.RefHistogramSample{
|
||||
{Ref: 2, T: minutes(50), H: tsdbutil.GenerateTestCustomBucketsHistogram(50)},
|
||||
{Ref: 2, T: minutes(51), H: tsdbutil.GenerateTestCustomBucketsHistogram(51)},
|
||||
{Ref: 2, T: minutes(52), H: tsdbutil.GenerateTestCustomBucketsHistogram(52)},
|
||||
{Ref: 2, T: minutes(53), H: tsdbutil.GenerateTestCustomBucketsHistogram(53)},
|
||||
},
|
||||
},
|
||||
},
|
||||
"custom buckets float histogram": {
|
||||
appendSample: func(app storage.Appender, l labels.Labels, mins int64) (storage.SeriesRef, error) {
|
||||
seriesRef, err := app.AppendHistogram(0, l, minutes(mins), nil, tsdbutil.GenerateTestCustomBucketsFloatHistogram(mins))
|
||||
require.NoError(t, err)
|
||||
return seriesRef, nil
|
||||
},
|
||||
expectedOOORecords: []interface{}{
|
||||
// The MmapRef in this are not hand calculated, and instead taken from the test run.
|
||||
// What is important here is the order of records, and that MmapRef increases for each record.
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 1},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(40)},
|
||||
},
|
||||
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 2},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(42)},
|
||||
},
|
||||
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(45)},
|
||||
{Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(35)},
|
||||
},
|
||||
[]record.RefMmapMarker{ // 3rd sample, hence m-mapped.
|
||||
{Ref: 1, MmapRef: 0x100000000 + 8},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(36)},
|
||||
{Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(37)},
|
||||
},
|
||||
|
||||
[]record.RefMmapMarker{ // 3rd sample, hence m-mapped.
|
||||
{Ref: 1, MmapRef: 0x100000000 + 134},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{ // Does not contain the in-order sample here.
|
||||
{Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
|
||||
},
|
||||
|
||||
// Single commit but multiple OOO records.
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 2, MmapRef: 0x100000000 + 263},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
|
||||
{Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(51)},
|
||||
},
|
||||
[]record.RefMmapMarker{
|
||||
{Ref: 2, MmapRef: 0x100000000 + 393},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(52)},
|
||||
{Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)},
|
||||
},
|
||||
},
|
||||
expectedInORecords: []interface{}{
|
||||
[]record.RefSeries{
|
||||
{Ref: 1, Labels: s1},
|
||||
{Ref: 2, Labels: s2},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 1, T: minutes(60), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(60)},
|
||||
{Ref: 2, T: minutes(60), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(60)},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 1, T: minutes(40), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(40)},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(42), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(42)},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(45), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(45)},
|
||||
{Ref: 1, T: minutes(35), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(35)},
|
||||
{Ref: 1, T: minutes(36), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(36)},
|
||||
{Ref: 1, T: minutes(37), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(37)},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{ // Contains both in-order and ooo sample.
|
||||
{Ref: 1, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
|
||||
{Ref: 2, T: minutes(65), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(65)},
|
||||
},
|
||||
[]record.RefFloatHistogramSample{
|
||||
{Ref: 2, T: minutes(50), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(50)},
|
||||
{Ref: 2, T: minutes(51), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(51)},
|
||||
{Ref: 2, T: minutes(52), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(52)},
|
||||
{Ref: 2, T: minutes(53), FH: tsdbutil.GenerateTestCustomBucketsFloatHistogram(53)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
|
@ -4374,11 +4556,11 @@ func testOOOWALWrite(t *testing.T,
|
|||
markers, err := dec.MmapMarkers(rec, nil)
|
||||
require.NoError(t, err)
|
||||
records = append(records, markers)
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
histogramSamples, err := dec.HistogramSamples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
records = append(records, histogramSamples)
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
floatHistogramSamples, err := dec.FloatHistogramSamples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
records = append(records, floatHistogramSamples)
|
||||
|
@ -6279,6 +6461,32 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario
|
|||
_, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh)
|
||||
return err
|
||||
}
|
||||
case customBucketsIntHistogram:
|
||||
appendFunc = func(app storage.Appender, ts, v int64) error {
|
||||
h := &histogram.Histogram{
|
||||
Schema: -53,
|
||||
Count: uint64(v),
|
||||
Sum: float64(v),
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []int64{v},
|
||||
CustomValues: []float64{float64(1), float64(2), float64(3)},
|
||||
}
|
||||
_, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, h, nil)
|
||||
return err
|
||||
}
|
||||
case customBucketsFloatHistogram:
|
||||
appendFunc = func(app storage.Appender, ts, v int64) error {
|
||||
fh := &histogram.FloatHistogram{
|
||||
Schema: -53,
|
||||
Count: float64(v),
|
||||
Sum: float64(v),
|
||||
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
|
||||
PositiveBuckets: []float64{float64(v)},
|
||||
CustomValues: []float64{float64(1), float64(2), float64(3)},
|
||||
}
|
||||
_, err := app.AppendHistogram(0, labels.FromStrings("foo", "bar1"), ts, nil, fh)
|
||||
return err
|
||||
}
|
||||
case gaugeIntHistogram, gaugeFloatHistogram:
|
||||
return
|
||||
}
|
||||
|
@ -6435,6 +6643,12 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario
|
|||
case floatHistogram:
|
||||
require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i)
|
||||
require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i)
|
||||
case customBucketsIntHistogram:
|
||||
require.Equal(t, tc.expectedSamples[i].hint, s.H().CounterResetHint, "sample %d", i)
|
||||
require.Equal(t, tc.expectedSamples[i].v, int64(s.H().Count), "sample %d", i)
|
||||
case customBucketsFloatHistogram:
|
||||
require.Equal(t, tc.expectedSamples[i].hint, s.FH().CounterResetHint, "sample %d", i)
|
||||
require.Equal(t, tc.expectedSamples[i].v, int64(s.FH().Count), "sample %d", i)
|
||||
default:
|
||||
t.Fatalf("unexpected sample type %s", name)
|
||||
}
|
||||
|
@ -6466,6 +6680,12 @@ func testOOOInterleavedImplicitCounterResets(t *testing.T, name string, scenario
|
|||
case floatHistogram:
|
||||
require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx)
|
||||
require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx)
|
||||
case customBucketsIntHistogram:
|
||||
require.Equal(t, expectHint, s.H().CounterResetHint, "sample %d", idx)
|
||||
require.Equal(t, tc.expectedSamples[idx].v, int64(s.H().Count), "sample %d", idx)
|
||||
case customBucketsFloatHistogram:
|
||||
require.Equal(t, expectHint, s.FH().CounterResetHint, "sample %d", idx)
|
||||
require.Equal(t, tc.expectedSamples[idx].v, int64(s.FH().Count), "sample %d", idx)
|
||||
default:
|
||||
t.Fatalf("unexpected sample type %s", name)
|
||||
}
|
||||
|
|
|
@ -205,13 +205,13 @@ A record with the integer native histograms with the exponential bucketing:
|
|||
│ ├─────────────────────────────────┬─────────────────────────────────┤ │
|
||||
│ │ positive_span_offset_1 <varint> │ positive_span_len_1 <uvarint32> │ │
|
||||
│ ├─────────────────────────────────┴─────────────────────────────────┤ │
|
||||
│ │ . . . │ │
|
||||
│ │ . . . │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ negative_spans_num <uvarint> │ │
|
||||
│ ├───────────────────────────────┬───────────────────────────────────┤ │
|
||||
│ │ negative_span_offset <varint> │ negative_span_len <uvarint32> │ │
|
||||
│ ├───────────────────────────────┴───────────────────────────────────┤ │
|
||||
│ │ . . . │ │
|
||||
│ │ . . . │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ positive_bkts_num <uvarint> │ │
|
||||
│ ├─────────────────────────┬───────┬─────────────────────────────────┤ │
|
||||
|
@ -225,7 +225,7 @@ A record with the integer native histograms with the exponential bucketing:
|
|||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
A records with the Float histograms:
|
||||
A record with the float native histograms with the exponential bucketing:
|
||||
|
||||
```
|
||||
┌───────────────────────────────────────────────────────────────────────┐
|
||||
|
@ -247,13 +247,13 @@ A records with the Float histograms:
|
|||
│ ├─────────────────────────────────┬─────────────────────────────────┤ │
|
||||
│ │ positive_span_offset_1 <varint> │ positive_span_len_1 <uvarint32> │ │
|
||||
│ ├─────────────────────────────────┴─────────────────────────────────┤ │
|
||||
│ │ . . . │ │
|
||||
│ │ . . . │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ negative_spans_num <uvarint> │ │
|
||||
│ ├───────────────────────────────┬───────────────────────────────────┤ │
|
||||
│ │ negative_span_offset <varint> │ negative_span_len <uvarint32> │ │
|
||||
│ ├───────────────────────────────┴───────────────────────────────────┤ │
|
||||
│ │ . . . │ │
|
||||
│ │ . . . │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ positive_bkts_num <uvarint> │ │
|
||||
│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │
|
||||
|
@ -266,3 +266,85 @@ A records with the Float histograms:
|
|||
│ . . . │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
A record with the integer native histograms with the custom bucketing, also known as NHCB.
|
||||
This record format is backwards compatible with type 7.
|
||||
|
||||
```
|
||||
┌───────────────────────────────────────────────────────────────────────┐
|
||||
│ type = 9 <1b> │
|
||||
├───────────────────────────────────────────────────────────────────────┤
|
||||
│ ┌────────────────────┬───────────────────────────┐ │
|
||||
│ │ id <8b> │ timestamp <8b> │ │
|
||||
│ └────────────────────┴───────────────────────────┘ │
|
||||
│ ┌────────────────────┬──────────────────────────────────────────────┐ │
|
||||
│ │ id_delta <uvarint> │ timestamp_delta <uvarint> │ │
|
||||
│ ├────────────────────┴────┬─────────────────────────────────────────┤ │
|
||||
│ │ counter_reset_hint <1b> │ schema <varint> │ │
|
||||
│ ├─────────────────────────┴────┬────────────────────────────────────┤ │
|
||||
│ │ zero_threshold (float) <8b> │ zero_count <uvarint> │ │
|
||||
│ ├─────────────────┬────────────┴────────────────────────────────────┤ │
|
||||
│ │ count <uvarint> │ sum (float) <8b> │ │
|
||||
│ ├─────────────────┴─────────────────────────────────────────────────┤ │
|
||||
│ │ positive_spans_num <uvarint> │ │
|
||||
│ ├─────────────────────────────────┬─────────────────────────────────┤ │
|
||||
│ │ positive_span_offset_1 <varint> │ positive_span_len_1 <uvarint32> │ │
|
||||
│ ├─────────────────────────────────┴─────────────────────────────────┤ │
|
||||
│ │ . . . │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ negative_spans_num <uvarint> = 0 │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ positive_bkts_num <uvarint> │ │
|
||||
│ ├─────────────────────────┬───────┬─────────────────────────────────┤ │
|
||||
│ │ positive_bkt_1 <varint> │ . . . │ positive_bkt_n <varint> │ │
|
||||
│ ├─────────────────────────┴───────┴─────────────────────────────────┤ │
|
||||
│ │ negative_bkts_num <uvarint> = 0 │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ custom_values_num <uvarint> │ │
|
||||
│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │
|
||||
│ │ custom_value_1 (float) <8b> │ . . . │ custom_value_n (float) <8b> │ │
|
||||
│ └─────────────────────────────┴───────┴─────────────────────────────┘ │
|
||||
│ . . . │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
A record with the float native histograms with the custom bucketing, also known as NHCB.
|
||||
This record format is backwards compatible with type 8.
|
||||
|
||||
```
|
||||
┌───────────────────────────────────────────────────────────────────────┐
|
||||
│ type = 10 <1b> │
|
||||
├───────────────────────────────────────────────────────────────────────┤
|
||||
│ ┌────────────────────┬───────────────────────────┐ │
|
||||
│ │ id <8b> │ timestamp <8b> │ │
|
||||
│ └────────────────────┴───────────────────────────┘ │
|
||||
│ ┌────────────────────┬──────────────────────────────────────────────┐ │
|
||||
│ │ id_delta <uvarint> │ timestamp_delta <uvarint> │ │
|
||||
│ ├────────────────────┴────┬─────────────────────────────────────────┤ │
|
||||
│ │ counter_reset_hint <1b> │ schema <varint> │ │
|
||||
│ ├─────────────────────────┴────┬────────────────────────────────────┤ │
|
||||
│ │ zero_threshold (float) <8b> │ zero_count (float) <8b> │ │
|
||||
│ ├────────────────────┬─────────┴────────────────────────────────────┤ │
|
||||
│ │ count (float) <8b> │ sum (float) <8b> │ │
|
||||
│ ├────────────────────┴──────────────────────────────────────────────┤ │
|
||||
│ │ positive_spans_num <uvarint> │ │
|
||||
│ ├─────────────────────────────────┬─────────────────────────────────┤ │
|
||||
│ │ positive_span_offset_1 <varint> │ positive_span_len_1 <uvarint32> │ │
|
||||
│ ├─────────────────────────────────┴─────────────────────────────────┤ │
|
||||
│ │ . . . │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ negative_spans_num <uvarint> = 0 │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ positive_bkts_num <uvarint> │ │
|
||||
│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │
|
||||
│ │ positive_bkt_1 (float) <8b> │ . . . │ positive_bkt_n (float) <8b> │ │
|
||||
│ ├─────────────────────────────┴───────┴─────────────────────────────┤ │
|
||||
│ │ negative_bkts_num <uvarint> = 0 │ │
|
||||
│ ├───────────────────────────────────────────────────────────────────┤ │
|
||||
│ │ custom_values_num <uvarint> │ │
|
||||
│ ├─────────────────────────────┬───────┬─────────────────────────────┤ │
|
||||
│ │ custom_value_1 (float) <8b> │ . . . │ custom_value_n (float) <8b> │ │
|
||||
│ └─────────────────────────────┴───────┴─────────────────────────────┘ │
|
||||
│ . . . │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
|
|
@ -943,17 +943,37 @@ func (a *headAppender) log() error {
|
|||
}
|
||||
}
|
||||
if len(a.histograms) > 0 {
|
||||
rec = enc.HistogramSamples(a.histograms, buf)
|
||||
var customBucketsHistograms []record.RefHistogramSample
|
||||
rec, customBucketsHistograms = enc.HistogramSamples(a.histograms, buf)
|
||||
buf = rec[:0]
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log histograms: %w", err)
|
||||
if len(rec) > 0 {
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log histograms: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(customBucketsHistograms) > 0 {
|
||||
rec = enc.CustomBucketsHistogramSamples(customBucketsHistograms, buf)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log custom buckets histograms: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(a.floatHistograms) > 0 {
|
||||
rec = enc.FloatHistogramSamples(a.floatHistograms, buf)
|
||||
var customBucketsFloatHistograms []record.RefFloatHistogramSample
|
||||
rec, customBucketsFloatHistograms = enc.FloatHistogramSamples(a.floatHistograms, buf)
|
||||
buf = rec[:0]
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log float histograms: %w", err)
|
||||
if len(rec) > 0 {
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log float histograms: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(customBucketsFloatHistograms) > 0 {
|
||||
rec = enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, buf)
|
||||
if err := a.head.wal.Log(rec); err != nil {
|
||||
return fmt.Errorf("log custom buckets float histograms: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exemplars should be logged after samples (float/native histogram/etc),
|
||||
|
@ -1070,12 +1090,24 @@ func (acc *appenderCommitContext) collectOOORecords(a *headAppender) {
|
|||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
}
|
||||
if len(acc.wblHistograms) > 0 {
|
||||
r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer())
|
||||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
r, customBucketsHistograms := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer())
|
||||
if len(r) > 0 {
|
||||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
}
|
||||
if len(customBucketsHistograms) > 0 {
|
||||
r := acc.enc.CustomBucketsHistogramSamples(customBucketsHistograms, a.head.getBytesBuffer())
|
||||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
}
|
||||
}
|
||||
if len(acc.wblFloatHistograms) > 0 {
|
||||
r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer())
|
||||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
r, customBucketsFloatHistograms := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer())
|
||||
if len(r) > 0 {
|
||||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
}
|
||||
if len(customBucketsFloatHistograms) > 0 {
|
||||
r := acc.enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, a.head.getBytesBuffer())
|
||||
acc.oooRecords = append(acc.oooRecords, r)
|
||||
}
|
||||
}
|
||||
|
||||
acc.wblSamples = nil
|
||||
|
|
|
@ -187,11 +187,11 @@ func readTestWAL(t testing.TB, dir string) (recs []interface{}) {
|
|||
samples, err := dec.Samples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
recs = append(recs, samples)
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
samples, err := dec.HistogramSamples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
recs = append(recs, samples)
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
samples, err := dec.FloatHistogramSamples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
recs = append(recs, samples)
|
||||
|
|
|
@ -187,7 +187,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
|||
return
|
||||
}
|
||||
decoded <- exemplars
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
hists := histogramsPool.Get()[:0]
|
||||
hists, err = dec.HistogramSamples(rec, hists)
|
||||
if err != nil {
|
||||
|
@ -199,7 +199,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
|||
return
|
||||
}
|
||||
decoded <- hists
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
hists := floatHistogramsPool.Get()[:0]
|
||||
hists, err = dec.FloatHistogramSamples(rec, hists)
|
||||
if err != nil {
|
||||
|
@ -723,7 +723,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
|||
return
|
||||
}
|
||||
decodedCh <- markers
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
hists := histogramSamplesPool.Get()[:0]
|
||||
hists, err = dec.HistogramSamples(rec, hists)
|
||||
if err != nil {
|
||||
|
@ -735,7 +735,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch
|
|||
return
|
||||
}
|
||||
decodedCh <- hists
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
hists := floatHistogramSamplesPool.Get()[:0]
|
||||
hists, err = dec.FloatHistogramSamples(rec, hists)
|
||||
if err != nil {
|
||||
|
|
|
@ -863,6 +863,11 @@ func (it *ListPostings) Err() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Len returns the remaining number of postings in the list.
|
||||
func (it *ListPostings) Len() int {
|
||||
return len(it.list)
|
||||
}
|
||||
|
||||
// bigEndianPostings implements the Postings interface over a byte stream of
|
||||
// big endian numbers.
|
||||
type bigEndianPostings struct {
|
||||
|
|
|
@ -1244,63 +1244,78 @@ func TestPostingsWithIndexHeap(t *testing.T) {
|
|||
func TestListPostings(t *testing.T) {
|
||||
t.Run("empty list", func(t *testing.T) {
|
||||
p := NewListPostings(nil)
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
require.False(t, p.Next())
|
||||
require.False(t, p.Seek(10))
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
|
||||
t.Run("one posting", func(t *testing.T) {
|
||||
t.Run("next", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10})
|
||||
require.Equal(t, 1, p.(*ListPostings).Len())
|
||||
require.True(t, p.Next())
|
||||
require.Equal(t, storage.SeriesRef(10), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek less", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10})
|
||||
require.Equal(t, 1, p.(*ListPostings).Len())
|
||||
require.True(t, p.Seek(5))
|
||||
require.Equal(t, storage.SeriesRef(10), p.At())
|
||||
require.True(t, p.Seek(5))
|
||||
require.Equal(t, storage.SeriesRef(10), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek equal", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10})
|
||||
require.Equal(t, 1, p.(*ListPostings).Len())
|
||||
require.True(t, p.Seek(10))
|
||||
require.Equal(t, storage.SeriesRef(10), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek more", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10})
|
||||
require.Equal(t, 1, p.(*ListPostings).Len())
|
||||
require.False(t, p.Seek(15))
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek after next", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10})
|
||||
require.Equal(t, 1, p.(*ListPostings).Len())
|
||||
require.True(t, p.Next())
|
||||
require.False(t, p.Seek(15))
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("multiple postings", func(t *testing.T) {
|
||||
t.Run("next", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 20})
|
||||
require.Equal(t, 2, p.(*ListPostings).Len())
|
||||
require.True(t, p.Next())
|
||||
require.Equal(t, storage.SeriesRef(10), p.At())
|
||||
require.True(t, p.Next())
|
||||
require.Equal(t, storage.SeriesRef(20), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 20})
|
||||
require.Equal(t, 2, p.(*ListPostings).Len())
|
||||
require.True(t, p.Seek(5))
|
||||
require.Equal(t, storage.SeriesRef(10), p.At())
|
||||
require.True(t, p.Seek(5))
|
||||
|
@ -1315,23 +1330,30 @@ func TestListPostings(t *testing.T) {
|
|||
require.Equal(t, storage.SeriesRef(20), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.NoError(t, p.Err())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek lest than last", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
|
||||
require.Equal(t, 5, p.(*ListPostings).Len())
|
||||
require.True(t, p.Seek(45))
|
||||
require.Equal(t, storage.SeriesRef(50), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek exactly last", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
|
||||
require.Equal(t, 5, p.(*ListPostings).Len())
|
||||
require.True(t, p.Seek(50))
|
||||
require.Equal(t, storage.SeriesRef(50), p.At())
|
||||
require.False(t, p.Next())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
t.Run("seek more than last", func(t *testing.T) {
|
||||
p := NewListPostings([]storage.SeriesRef{10, 20, 30, 40, 50})
|
||||
require.Equal(t, 5, p.(*ListPostings).Len())
|
||||
require.False(t, p.Seek(60))
|
||||
require.False(t, p.Next())
|
||||
require.Equal(t, 0, p.(*ListPostings).Len())
|
||||
})
|
||||
})
|
||||
|
||||
|
|
|
@ -963,7 +963,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding(
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "After Series() prev head gets mmapped after getting samples, new head gets new samples also overlapping, none of these should appear in response.",
|
||||
name: "After Series() prev head mmapped after getting samples, new head gets new samples also overlapping, none should appear in response.",
|
||||
queryMinT: minutes(0),
|
||||
queryMaxT: minutes(100),
|
||||
firstInOrderSampleAt: minutes(120),
|
||||
|
|
|
@ -52,6 +52,10 @@ const (
|
|||
HistogramSamples Type = 7
|
||||
// FloatHistogramSamples is used to match WAL records of type Float Histograms.
|
||||
FloatHistogramSamples Type = 8
|
||||
// CustomBucketsHistogramSamples is used to match WAL records of type Histogram with custom buckets.
|
||||
CustomBucketsHistogramSamples Type = 9
|
||||
// CustomBucketsFloatHistogramSamples is used to match WAL records of type Float Histogram with custom buckets.
|
||||
CustomBucketsFloatHistogramSamples Type = 10
|
||||
)
|
||||
|
||||
func (rt Type) String() string {
|
||||
|
@ -68,6 +72,10 @@ func (rt Type) String() string {
|
|||
return "histogram_samples"
|
||||
case FloatHistogramSamples:
|
||||
return "float_histogram_samples"
|
||||
case CustomBucketsHistogramSamples:
|
||||
return "custom_buckets_histogram_samples"
|
||||
case CustomBucketsFloatHistogramSamples:
|
||||
return "custom_buckets_float_histogram_samples"
|
||||
case MmapMarkers:
|
||||
return "mmapmarkers"
|
||||
case Metadata:
|
||||
|
@ -207,7 +215,7 @@ func (d *Decoder) Type(rec []byte) Type {
|
|||
return Unknown
|
||||
}
|
||||
switch t := Type(rec[0]); t {
|
||||
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples:
|
||||
case Series, Samples, Tombstones, Exemplars, MmapMarkers, Metadata, HistogramSamples, FloatHistogramSamples, CustomBucketsHistogramSamples, CustomBucketsFloatHistogramSamples:
|
||||
return t
|
||||
}
|
||||
return Unknown
|
||||
|
@ -428,7 +436,7 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar
|
|||
func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) ([]RefHistogramSample, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
t := Type(dec.Byte())
|
||||
if t != HistogramSamples {
|
||||
if t != HistogramSamples && t != CustomBucketsHistogramSamples {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
if dec.Len() == 0 {
|
||||
|
@ -505,12 +513,22 @@ func DecodeHistogram(buf *encoding.Decbuf, h *histogram.Histogram) {
|
|||
for i := range h.NegativeBuckets {
|
||||
h.NegativeBuckets[i] = buf.Varint64()
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) {
|
||||
l = buf.Uvarint()
|
||||
if l > 0 {
|
||||
h.CustomValues = make([]float64, l)
|
||||
}
|
||||
for i := range h.CustomValues {
|
||||
h.CustomValues[i] = buf.Be64Float64()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogramSample) ([]RefFloatHistogramSample, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
t := Type(dec.Byte())
|
||||
if t != FloatHistogramSamples {
|
||||
if t != FloatHistogramSamples && t != CustomBucketsFloatHistogramSamples {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
if dec.Len() == 0 {
|
||||
|
@ -587,6 +605,16 @@ func DecodeFloatHistogram(buf *encoding.Decbuf, fh *histogram.FloatHistogram) {
|
|||
for i := range fh.NegativeBuckets {
|
||||
fh.NegativeBuckets[i] = buf.Be64Float64()
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(fh.Schema) {
|
||||
l = buf.Uvarint()
|
||||
if l > 0 {
|
||||
fh.CustomValues = make([]float64, l)
|
||||
}
|
||||
for i := range fh.CustomValues {
|
||||
fh.CustomValues[i] = buf.Be64Float64()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Encoder encodes series, sample, and tombstones records.
|
||||
|
@ -716,10 +744,44 @@ func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte {
|
|||
return buf.Get()
|
||||
}
|
||||
|
||||
func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) []byte {
|
||||
func (e *Encoder) HistogramSamples(histograms []RefHistogramSample, b []byte) ([]byte, []RefHistogramSample) {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(HistogramSamples))
|
||||
|
||||
if len(histograms) == 0 {
|
||||
return buf.Get(), nil
|
||||
}
|
||||
var customBucketHistograms []RefHistogramSample
|
||||
|
||||
// Store base timestamp and base reference number of first histogram.
|
||||
// All histograms encode their timestamp and ref as delta to those.
|
||||
first := histograms[0]
|
||||
buf.PutBE64(uint64(first.Ref))
|
||||
buf.PutBE64int64(first.T)
|
||||
|
||||
for _, h := range histograms {
|
||||
if h.H.UsesCustomBuckets() {
|
||||
customBucketHistograms = append(customBucketHistograms, h)
|
||||
continue
|
||||
}
|
||||
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
||||
buf.PutVarint64(h.T - first.T)
|
||||
|
||||
EncodeHistogram(&buf, h.H)
|
||||
}
|
||||
|
||||
// Reset buffer if only custom bucket histograms existed in list of histogram samples.
|
||||
if len(histograms) == len(customBucketHistograms) {
|
||||
buf.Reset()
|
||||
}
|
||||
|
||||
return buf.Get(), customBucketHistograms
|
||||
}
|
||||
|
||||
func (e *Encoder) CustomBucketsHistogramSamples(histograms []RefHistogramSample, b []byte) []byte {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(CustomBucketsHistogramSamples))
|
||||
|
||||
if len(histograms) == 0 {
|
||||
return buf.Get()
|
||||
}
|
||||
|
@ -772,12 +834,54 @@ func EncodeHistogram(buf *encoding.Encbuf, h *histogram.Histogram) {
|
|||
for _, b := range h.NegativeBuckets {
|
||||
buf.PutVarint64(b)
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) {
|
||||
buf.PutUvarint(len(h.CustomValues))
|
||||
for _, v := range h.CustomValues {
|
||||
buf.PutBEFloat64(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
|
||||
func (e *Encoder) FloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) ([]byte, []RefFloatHistogramSample) {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(FloatHistogramSamples))
|
||||
|
||||
if len(histograms) == 0 {
|
||||
return buf.Get(), nil
|
||||
}
|
||||
|
||||
var customBucketsFloatHistograms []RefFloatHistogramSample
|
||||
|
||||
// Store base timestamp and base reference number of first histogram.
|
||||
// All histograms encode their timestamp and ref as delta to those.
|
||||
first := histograms[0]
|
||||
buf.PutBE64(uint64(first.Ref))
|
||||
buf.PutBE64int64(first.T)
|
||||
|
||||
for _, h := range histograms {
|
||||
if h.FH.UsesCustomBuckets() {
|
||||
customBucketsFloatHistograms = append(customBucketsFloatHistograms, h)
|
||||
continue
|
||||
}
|
||||
buf.PutVarint64(int64(h.Ref) - int64(first.Ref))
|
||||
buf.PutVarint64(h.T - first.T)
|
||||
|
||||
EncodeFloatHistogram(&buf, h.FH)
|
||||
}
|
||||
|
||||
// Reset buffer if only custom bucket histograms existed in list of histogram samples
|
||||
if len(histograms) == len(customBucketsFloatHistograms) {
|
||||
buf.Reset()
|
||||
}
|
||||
|
||||
return buf.Get(), customBucketsFloatHistograms
|
||||
}
|
||||
|
||||
func (e *Encoder) CustomBucketsFloatHistogramSamples(histograms []RefFloatHistogramSample, b []byte) []byte {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(CustomBucketsFloatHistogramSamples))
|
||||
|
||||
if len(histograms) == 0 {
|
||||
return buf.Get()
|
||||
}
|
||||
|
@ -830,4 +934,11 @@ func EncodeFloatHistogram(buf *encoding.Encbuf, h *histogram.FloatHistogram) {
|
|||
for _, b := range h.NegativeBuckets {
|
||||
buf.PutBEFloat64(b)
|
||||
}
|
||||
|
||||
if histogram.IsCustomBucketsSchema(h.Schema) {
|
||||
buf.PutUvarint(len(h.CustomValues))
|
||||
for _, v := range h.CustomValues {
|
||||
buf.PutBEFloat64(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,13 +15,17 @@
|
|||
package record
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
|
@ -148,10 +152,31 @@ func TestRecord_EncodeDecode(t *testing.T) {
|
|||
NegativeBuckets: []int64{1, 2, -1},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ref: 67,
|
||||
T: 5678,
|
||||
H: &histogram.Histogram{
|
||||
Count: 8,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 35.5,
|
||||
Schema: -53,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, -1, 2, 0},
|
||||
CustomValues: []float64{0, 2, 4, 6, 8},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
decHistograms, err := dec.HistogramSamples(enc.HistogramSamples(histograms, nil), nil)
|
||||
histSamples, customBucketsHistograms := enc.HistogramSamples(histograms, nil)
|
||||
customBucketsHistSamples := enc.CustomBucketsHistogramSamples(customBucketsHistograms, nil)
|
||||
decHistograms, err := dec.HistogramSamples(histSamples, nil)
|
||||
require.NoError(t, err)
|
||||
decCustomBucketsHistograms, err := dec.HistogramSamples(customBucketsHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
decHistograms = append(decHistograms, decCustomBucketsHistograms...)
|
||||
require.Equal(t, histograms, decHistograms)
|
||||
|
||||
floatHistograms := make([]RefFloatHistogramSample, len(histograms))
|
||||
|
@ -162,25 +187,42 @@ func TestRecord_EncodeDecode(t *testing.T) {
|
|||
FH: h.H.ToFloat(nil),
|
||||
}
|
||||
}
|
||||
decFloatHistograms, err := dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil)
|
||||
floatHistSamples, customBucketsFloatHistograms := enc.FloatHistogramSamples(floatHistograms, nil)
|
||||
customBucketsFloatHistSamples := enc.CustomBucketsFloatHistogramSamples(customBucketsFloatHistograms, nil)
|
||||
decFloatHistograms, err := dec.FloatHistogramSamples(floatHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
decCustomBucketsFloatHistograms, err := dec.FloatHistogramSamples(customBucketsFloatHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
decFloatHistograms = append(decFloatHistograms, decCustomBucketsFloatHistograms...)
|
||||
require.Equal(t, floatHistograms, decFloatHistograms)
|
||||
|
||||
// Gauge integer histograms.
|
||||
for i := range histograms {
|
||||
histograms[i].H.CounterResetHint = histogram.GaugeType
|
||||
}
|
||||
decHistograms, err = dec.HistogramSamples(enc.HistogramSamples(histograms, nil), nil)
|
||||
|
||||
gaugeHistSamples, customBucketsGaugeHistograms := enc.HistogramSamples(histograms, nil)
|
||||
customBucketsGaugeHistSamples := enc.CustomBucketsHistogramSamples(customBucketsGaugeHistograms, nil)
|
||||
decGaugeHistograms, err := dec.HistogramSamples(gaugeHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, histograms, decHistograms)
|
||||
decCustomBucketsGaugeHistograms, err := dec.HistogramSamples(customBucketsGaugeHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
decGaugeHistograms = append(decGaugeHistograms, decCustomBucketsGaugeHistograms...)
|
||||
require.Equal(t, histograms, decGaugeHistograms)
|
||||
|
||||
// Gauge float histograms.
|
||||
for i := range floatHistograms {
|
||||
floatHistograms[i].FH.CounterResetHint = histogram.GaugeType
|
||||
}
|
||||
decFloatHistograms, err = dec.FloatHistogramSamples(enc.FloatHistogramSamples(floatHistograms, nil), nil)
|
||||
|
||||
gaugeFloatHistSamples, customBucketsGaugeFloatHistograms := enc.FloatHistogramSamples(floatHistograms, nil)
|
||||
customBucketsGaugeFloatHistSamples := enc.CustomBucketsFloatHistogramSamples(customBucketsGaugeFloatHistograms, nil)
|
||||
decGaugeFloatHistograms, err := dec.FloatHistogramSamples(gaugeFloatHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, floatHistograms, decFloatHistograms)
|
||||
decCustomBucketsGaugeFloatHistograms, err := dec.FloatHistogramSamples(customBucketsGaugeFloatHistSamples, nil)
|
||||
require.NoError(t, err)
|
||||
decGaugeFloatHistograms = append(decGaugeFloatHistograms, decCustomBucketsGaugeFloatHistograms...)
|
||||
require.Equal(t, floatHistograms, decGaugeFloatHistograms)
|
||||
}
|
||||
|
||||
// TestRecord_Corrupted ensures that corrupted records return the correct error.
|
||||
|
@ -263,10 +305,31 @@ func TestRecord_Corrupted(t *testing.T) {
|
|||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ref: 67,
|
||||
T: 5678,
|
||||
H: &histogram.Histogram{
|
||||
Count: 8,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 35.5,
|
||||
Schema: -53,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, -1, 2, 0},
|
||||
CustomValues: []float64{0, 2, 4, 6, 8},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
corrupted := enc.HistogramSamples(histograms, nil)[:8]
|
||||
_, err := dec.HistogramSamples(corrupted, nil)
|
||||
corruptedHists, customBucketsHists := enc.HistogramSamples(histograms, nil)
|
||||
corruptedHists = corruptedHists[:8]
|
||||
corruptedCustomBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHists, nil)
|
||||
corruptedCustomBucketsHists = corruptedCustomBucketsHists[:8]
|
||||
_, err := dec.HistogramSamples(corruptedHists, nil)
|
||||
require.ErrorIs(t, err, encoding.ErrInvalidSize)
|
||||
_, err = dec.HistogramSamples(corruptedCustomBucketsHists, nil)
|
||||
require.ErrorIs(t, err, encoding.ErrInvalidSize)
|
||||
})
|
||||
}
|
||||
|
@ -308,9 +371,29 @@ func TestRecord_Type(t *testing.T) {
|
|||
PositiveBuckets: []int64{1, 1, -1, 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ref: 67,
|
||||
T: 5678,
|
||||
H: &histogram.Histogram{
|
||||
Count: 8,
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 35.5,
|
||||
Schema: -53,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 2, Length: 2},
|
||||
},
|
||||
PositiveBuckets: []int64{2, -1, 2, 0},
|
||||
CustomValues: []float64{0, 2, 4, 6, 8},
|
||||
},
|
||||
},
|
||||
}
|
||||
recordType = dec.Type(enc.HistogramSamples(histograms, nil))
|
||||
hists, customBucketsHistograms := enc.HistogramSamples(histograms, nil)
|
||||
recordType = dec.Type(hists)
|
||||
require.Equal(t, HistogramSamples, recordType)
|
||||
customBucketsHists := enc.CustomBucketsHistogramSamples(customBucketsHistograms, nil)
|
||||
recordType = dec.Type(customBucketsHists)
|
||||
require.Equal(t, CustomBucketsHistogramSamples, recordType)
|
||||
|
||||
recordType = dec.Type(nil)
|
||||
require.Equal(t, Unknown, recordType)
|
||||
|
@ -385,3 +468,133 @@ func TestRecord_MetadataDecodeUnknownExtraFields(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, expectedMetadata, decMetadata)
|
||||
}
|
||||
|
||||
type refsCreateFn func(labelCount, histograms, buckets int) ([]RefSeries, []RefSample, []RefHistogramSample)
|
||||
|
||||
type recordsMaker struct {
|
||||
name string
|
||||
make refsCreateFn
|
||||
}
|
||||
|
||||
// BenchmarkWAL_HistogramEncoding measures efficiency of encoding classic
|
||||
// histograms and native historgrams with custom buckets (NHCB).
|
||||
func BenchmarkWAL_HistogramEncoding(b *testing.B) {
|
||||
initClassicRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) {
|
||||
ref := chunks.HeadSeriesRef(0)
|
||||
lbls := map[string]string{}
|
||||
for i := range labelCount {
|
||||
lbls[fmt.Sprintf("l%d", i)] = fmt.Sprintf("v%d", i)
|
||||
}
|
||||
for i := range histograms {
|
||||
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_count", i)
|
||||
series = append(series, RefSeries{
|
||||
Ref: ref,
|
||||
Labels: labels.FromMap(lbls),
|
||||
})
|
||||
floatSamples = append(floatSamples, RefSample{
|
||||
Ref: ref,
|
||||
T: 100,
|
||||
V: float64(i),
|
||||
})
|
||||
ref++
|
||||
|
||||
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_sum", i)
|
||||
series = append(series, RefSeries{
|
||||
Ref: ref,
|
||||
Labels: labels.FromMap(lbls),
|
||||
})
|
||||
floatSamples = append(floatSamples, RefSample{
|
||||
Ref: ref,
|
||||
T: 100,
|
||||
V: float64(i),
|
||||
})
|
||||
ref++
|
||||
|
||||
if buckets == 0 {
|
||||
continue
|
||||
}
|
||||
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d_bucket", i)
|
||||
for j := range buckets {
|
||||
lbls[model.BucketLabel] = fmt.Sprintf("%d.0", j)
|
||||
series = append(series, RefSeries{
|
||||
Ref: ref,
|
||||
Labels: labels.FromMap(lbls),
|
||||
})
|
||||
floatSamples = append(floatSamples, RefSample{
|
||||
Ref: ref,
|
||||
T: 100,
|
||||
V: float64(i + j),
|
||||
})
|
||||
ref++
|
||||
}
|
||||
delete(lbls, model.BucketLabel)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
initNHCBRefs := func(labelCount, histograms, buckets int) (series []RefSeries, floatSamples []RefSample, histSamples []RefHistogramSample) {
|
||||
ref := chunks.HeadSeriesRef(0)
|
||||
lbls := map[string]string{}
|
||||
for i := range labelCount {
|
||||
lbls[fmt.Sprintf("l%d", i)] = fmt.Sprintf("v%d", i)
|
||||
}
|
||||
for i := range histograms {
|
||||
lbls[model.MetricNameLabel] = fmt.Sprintf("series_%d", i)
|
||||
series = append(series, RefSeries{
|
||||
Ref: ref,
|
||||
Labels: labels.FromMap(lbls),
|
||||
})
|
||||
h := &histogram.Histogram{
|
||||
Schema: histogram.CustomBucketsSchema,
|
||||
Count: uint64(i),
|
||||
Sum: float64(i),
|
||||
PositiveSpans: []histogram.Span{{Length: uint32(buckets)}},
|
||||
PositiveBuckets: make([]int64, buckets+1),
|
||||
CustomValues: make([]float64, buckets),
|
||||
}
|
||||
for j := range buckets {
|
||||
h.PositiveBuckets[j] = int64(i + j)
|
||||
}
|
||||
histSamples = append(histSamples, RefHistogramSample{
|
||||
Ref: ref,
|
||||
T: 100,
|
||||
H: h,
|
||||
})
|
||||
ref++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, maker := range []recordsMaker{
|
||||
{
|
||||
name: "classic",
|
||||
make: initClassicRefs,
|
||||
},
|
||||
{
|
||||
name: "nhcb",
|
||||
make: initNHCBRefs,
|
||||
},
|
||||
} {
|
||||
for _, labelCount := range []int{0, 10, 50} {
|
||||
for _, histograms := range []int{10, 100, 1000} {
|
||||
for _, buckets := range []int{0, 1, 10, 100} {
|
||||
b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) {
|
||||
series, samples, nhcbs := maker.make(labelCount, histograms, buckets)
|
||||
enc := Encoder{}
|
||||
for range b.N {
|
||||
var buf []byte
|
||||
enc.Series(series, buf)
|
||||
enc.Samples(samples, buf)
|
||||
var leftOver []RefHistogramSample
|
||||
_, leftOver = enc.HistogramSamples(nhcbs, buf)
|
||||
if len(leftOver) > 0 {
|
||||
enc.CustomBucketsHistogramSamples(leftOver, buf)
|
||||
}
|
||||
b.ReportMetric(float64(len(buf)), "recordBytes/ops")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,11 +29,13 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
float = "float"
|
||||
intHistogram = "integer histogram"
|
||||
floatHistogram = "float histogram"
|
||||
gaugeIntHistogram = "gauge int histogram"
|
||||
gaugeFloatHistogram = "gauge float histogram"
|
||||
float = "float"
|
||||
intHistogram = "integer histogram"
|
||||
floatHistogram = "float histogram"
|
||||
customBucketsIntHistogram = "custom buckets int histogram"
|
||||
customBucketsFloatHistogram = "custom buckets float histogram"
|
||||
gaugeIntHistogram = "gauge int histogram"
|
||||
gaugeFloatHistogram = "gauge float histogram"
|
||||
)
|
||||
|
||||
type testValue struct {
|
||||
|
@ -82,6 +84,28 @@ var sampleTypeScenarios = map[string]sampleTypeScenario{
|
|||
return sample{t: ts, fh: tsdbutil.GenerateTestFloatHistogram(value)}
|
||||
},
|
||||
},
|
||||
customBucketsIntHistogram: {
|
||||
sampleType: sampleMetricTypeHistogram,
|
||||
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
s := sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
|
||||
ref, err := appender.AppendHistogram(0, lbls, ts, s.h, nil)
|
||||
return ref, s, err
|
||||
},
|
||||
sampleFunc: func(ts, value int64) sample {
|
||||
return sample{t: ts, h: tsdbutil.GenerateTestCustomBucketsHistogram(value)}
|
||||
},
|
||||
},
|
||||
customBucketsFloatHistogram: {
|
||||
sampleType: sampleMetricTypeHistogram,
|
||||
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
s := sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
|
||||
ref, err := appender.AppendHistogram(0, lbls, ts, nil, s.fh)
|
||||
return ref, s, err
|
||||
},
|
||||
sampleFunc: func(ts, value int64) sample {
|
||||
return sample{t: ts, fh: tsdbutil.GenerateTestCustomBucketsFloatHistogram(value)}
|
||||
},
|
||||
},
|
||||
gaugeIntHistogram: {
|
||||
sampleType: sampleMetricTypeHistogram,
|
||||
appendFunc: func(appender storage.Appender, lbls labels.Labels, ts, value int64) (storage.SeriesRef, sample, error) {
|
||||
|
|
|
@ -57,6 +57,17 @@ func GenerateTestHistogram(i int64) *histogram.Histogram {
|
|||
}
|
||||
}
|
||||
|
||||
func GenerateTestCustomBucketsHistograms(n int) (r []*histogram.Histogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
h := GenerateTestCustomBucketsHistogram(int64(i))
|
||||
if i > 0 {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
r = append(r, h)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestCustomBucketsHistogram(i int64) *histogram.Histogram {
|
||||
return &histogram.Histogram{
|
||||
Count: 5 + uint64(i*4),
|
||||
|
@ -117,6 +128,17 @@ func GenerateTestFloatHistogram(i int64) *histogram.FloatHistogram {
|
|||
}
|
||||
}
|
||||
|
||||
func GenerateTestCustomBucketsFloatHistograms(n int) (r []*histogram.FloatHistogram) {
|
||||
for i := 0; i < n; i++ {
|
||||
h := GenerateTestCustomBucketsFloatHistogram(int64(i))
|
||||
if i > 0 {
|
||||
h.CounterResetHint = histogram.NotCounterReset
|
||||
}
|
||||
r = append(r, h)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func GenerateTestCustomBucketsFloatHistogram(i int64) *histogram.FloatHistogram {
|
||||
return &histogram.FloatHistogram{
|
||||
Count: 5 + float64(i*4),
|
||||
|
|
|
@ -222,11 +222,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
|
|||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.HistogramSamples(repl, buf)
|
||||
buf, _ = enc.HistogramSamples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(histogramSamples)
|
||||
stats.DroppedSamples += len(histogramSamples) - len(repl)
|
||||
case record.CustomBucketsHistogramSamples:
|
||||
histogramSamples, err = dec.HistogramSamples(rec, histogramSamples)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decode histogram samples: %w", err)
|
||||
}
|
||||
// Drop irrelevant histogramSamples in place.
|
||||
repl := histogramSamples[:0]
|
||||
for _, h := range histogramSamples {
|
||||
if h.T >= mint {
|
||||
repl = append(repl, h)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.CustomBucketsHistogramSamples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(histogramSamples)
|
||||
stats.DroppedSamples += len(histogramSamples) - len(repl)
|
||||
|
||||
case record.FloatHistogramSamples:
|
||||
floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
|
||||
if err != nil {
|
||||
|
@ -240,11 +256,27 @@ func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.He
|
|||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.FloatHistogramSamples(repl, buf)
|
||||
buf, _ = enc.FloatHistogramSamples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(floatHistogramSamples)
|
||||
stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
|
||||
case record.CustomBucketsFloatHistogramSamples:
|
||||
floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decode float histogram samples: %w", err)
|
||||
}
|
||||
// Drop irrelevant floatHistogramSamples in place.
|
||||
repl := floatHistogramSamples[:0]
|
||||
for _, fh := range floatHistogramSamples {
|
||||
if fh.T >= mint {
|
||||
repl = append(repl, fh)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.CustomBucketsFloatHistogramSamples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(floatHistogramSamples)
|
||||
stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
|
||||
|
||||
case record.Tombstones:
|
||||
tstones, err = dec.Tombstones(rec, tstones)
|
||||
if err != nil {
|
||||
|
|
|
@ -127,6 +127,20 @@ func TestCheckpoint(t *testing.T) {
|
|||
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
|
||||
}
|
||||
}
|
||||
makeCustomBucketHistogram := func(i int) *histogram.Histogram {
|
||||
return &histogram.Histogram{
|
||||
Count: 5 + uint64(i*4),
|
||||
ZeroCount: 2 + uint64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: -53,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
CustomValues: []float64{0, 1, 2, 3, 4},
|
||||
}
|
||||
}
|
||||
makeFloatHistogram := func(i int) *histogram.FloatHistogram {
|
||||
return &histogram.FloatHistogram{
|
||||
Count: 5 + float64(i*4),
|
||||
|
@ -141,6 +155,20 @@ func TestCheckpoint(t *testing.T) {
|
|||
PositiveBuckets: []float64{float64(i + 1), 1, -1, 0},
|
||||
}
|
||||
}
|
||||
makeCustomBucketFloatHistogram := func(i int) *histogram.FloatHistogram {
|
||||
return &histogram.FloatHistogram{
|
||||
Count: 5 + float64(i*4),
|
||||
ZeroCount: 2 + float64(i),
|
||||
ZeroThreshold: 0.001,
|
||||
Sum: 18.4 * float64(i+1),
|
||||
Schema: -53,
|
||||
PositiveSpans: []histogram.Span{
|
||||
{Offset: 0, Length: 2},
|
||||
{Offset: 1, Length: 2},
|
||||
},
|
||||
CustomValues: []float64{0, 1, 2, 3, 4},
|
||||
}
|
||||
}
|
||||
|
||||
for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} {
|
||||
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
|
||||
|
@ -167,7 +195,7 @@ func TestCheckpoint(t *testing.T) {
|
|||
require.NoError(t, w.Close())
|
||||
|
||||
// Start a WAL and write records to it as usual.
|
||||
w, err = NewSize(nil, nil, dir, 64*1024, compress)
|
||||
w, err = NewSize(nil, nil, dir, 128*1024, compress)
|
||||
require.NoError(t, err)
|
||||
|
||||
samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0
|
||||
|
@ -208,7 +236,7 @@ func TestCheckpoint(t *testing.T) {
|
|||
require.NoError(t, w.Log(b))
|
||||
samplesInWAL += 4
|
||||
h := makeHistogram(i)
|
||||
b = enc.HistogramSamples([]record.RefHistogramSample{
|
||||
b, _ = enc.HistogramSamples([]record.RefHistogramSample{
|
||||
{Ref: 0, T: last, H: h},
|
||||
{Ref: 1, T: last + 10000, H: h},
|
||||
{Ref: 2, T: last + 20000, H: h},
|
||||
|
@ -216,8 +244,17 @@ func TestCheckpoint(t *testing.T) {
|
|||
}, nil)
|
||||
require.NoError(t, w.Log(b))
|
||||
histogramsInWAL += 4
|
||||
cbh := makeCustomBucketHistogram(i)
|
||||
b = enc.CustomBucketsHistogramSamples([]record.RefHistogramSample{
|
||||
{Ref: 0, T: last, H: cbh},
|
||||
{Ref: 1, T: last + 10000, H: cbh},
|
||||
{Ref: 2, T: last + 20000, H: cbh},
|
||||
{Ref: 3, T: last + 30000, H: cbh},
|
||||
}, nil)
|
||||
require.NoError(t, w.Log(b))
|
||||
histogramsInWAL += 4
|
||||
fh := makeFloatHistogram(i)
|
||||
b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
|
||||
b, _ = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
|
||||
{Ref: 0, T: last, FH: fh},
|
||||
{Ref: 1, T: last + 10000, FH: fh},
|
||||
{Ref: 2, T: last + 20000, FH: fh},
|
||||
|
@ -225,6 +262,15 @@ func TestCheckpoint(t *testing.T) {
|
|||
}, nil)
|
||||
require.NoError(t, w.Log(b))
|
||||
floatHistogramsInWAL += 4
|
||||
cbfh := makeCustomBucketFloatHistogram(i)
|
||||
b = enc.CustomBucketsFloatHistogramSamples([]record.RefFloatHistogramSample{
|
||||
{Ref: 0, T: last, FH: cbfh},
|
||||
{Ref: 1, T: last + 10000, FH: cbfh},
|
||||
{Ref: 2, T: last + 20000, FH: cbfh},
|
||||
{Ref: 3, T: last + 30000, FH: cbfh},
|
||||
}, nil)
|
||||
require.NoError(t, w.Log(b))
|
||||
floatHistogramsInWAL += 4
|
||||
|
||||
b = enc.Exemplars([]record.RefExemplar{
|
||||
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))},
|
||||
|
@ -284,14 +330,14 @@ func TestCheckpoint(t *testing.T) {
|
|||
require.GreaterOrEqual(t, s.T, last/2, "sample with wrong timestamp")
|
||||
}
|
||||
samplesInCheckpoint += len(samples)
|
||||
case record.HistogramSamples:
|
||||
case record.HistogramSamples, record.CustomBucketsHistogramSamples:
|
||||
histograms, err := dec.HistogramSamples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
for _, h := range histograms {
|
||||
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
|
||||
}
|
||||
histogramsInCheckpoint += len(histograms)
|
||||
case record.FloatHistogramSamples:
|
||||
case record.FloatHistogramSamples, record.CustomBucketsFloatHistogramSamples:
|
||||
floatHistograms, err := dec.FloatHistogramSamples(rec, nil)
|
||||
require.NoError(t, err)
|
||||
for _, h := range floatHistograms {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue