diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index 66a72af53..000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,56 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 60 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: false - -# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) -onlyLabels: [] - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: - - keepalive - -# Set to true to ignore issues in a project (defaults to false) -exemptProjects: false - -# Set to true to ignore issues in a milestone (defaults to false) -exemptMilestones: false - -# Set to true to ignore issues with an assignee (defaults to false) -exemptAssignees: false - -# Label to use when marking as stale -staleLabel: stale - -# Comment to post when marking as stale. Set to `false` to disable -markComment: false - -# Comment to post when removing the stale label. -# unmarkComment: > -# Your comment here. - -# Comment to post when closing a stale Issue or Pull Request. -# closeComment: > -# Your comment here. - -# Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 30 - -# Limit to only `issues` or `pulls` -only: pulls - -# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': -# pulls: -# daysUntilStale: 30 -# markComment: > -# This pull request has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. - -# issues: -# exemptLabels: -# - confirmed diff --git a/.github/workflows/buf-lint.yml b/.github/workflows/buf-lint.yml index cbfeb2ba5..3f6cf76e1 100644 --- a/.github/workflows/buf-lint.yml +++ b/.github/workflows/buf-lint.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/buf.yml b/.github/workflows/buf.yml index 8b964ef24..632d38cb0 100644 --- a/.github/workflows/buf.yml +++ b/.github/workflows/buf.yml @@ -13,7 +13,7 @@ jobs: if: github.repository_owner == 'prometheus' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a5a09a98f..5934a3daf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.22-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/setup_environment - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false @@ -28,7 +28,7 @@ jobs: image: quay.io/prometheus/golang-builder:1.22-base steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/setup_environment - run: go test --tags=dedupelabels ./... - run: GOARCH=386 go test ./cmd/prometheus @@ -58,7 +58,7 @@ jobs: steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/setup_environment with: enable_go: false @@ -75,7 +75,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22.x - run: | @@ -115,7 +115,7 @@ jobs: thread: [ 0, 1, 2 ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/build with: promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386" @@ -138,7 +138,7 @@ jobs: # should also be updated. steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/build with: parallelism: 12 @@ -162,7 +162,7 @@ jobs: - name: Checkout repository uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: cache: false go-version: 1.22.x @@ -175,18 +175,18 @@ jobs: - name: Checkout repository uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: args: --verbose # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. - version: v1.59.1 + version: v1.60.2 fuzzing: uses: ./.github/workflows/fuzzing.yml if: github.event_name == 'pull_request' @@ -200,7 +200,7 @@ jobs: if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/publish_main with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -214,7 +214,7 @@ jobs: if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - uses: ./.github/promci/actions/publish_release with: docker_hub_login: ${{ secrets.docker_hub_login }} @@ -229,9 +229,9 @@ jobs: steps: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 + - uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0 - name: Install nodejs - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version-file: "web/ui/.nvmrc" registry-url: "https://registry.npmjs.org" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 12ffc659c..89aa2ba29 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,12 +27,12 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Initialize CodeQL - uses: github/codeql-action/init@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 with: languages: ${{ matrix.language }} - name: Autobuild - uses: github/codeql-action/autobuild@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b611370bb5703a7efb587f9d136a52ea24c5c38c # v3.25.11 + uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 diff --git a/.github/workflows/fuzzing.yml b/.github/workflows/fuzzing.yml index dc510e596..f3953cb2a 100644 --- a/.github/workflows/fuzzing.yml +++ b/.github/workflows/fuzzing.yml @@ -21,7 +21,7 @@ jobs: fuzz-seconds: 600 dry-run: false - name: Upload Crash - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 if: failure() && steps.build.outcome == 'success' with: name: artifacts diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c82fa87a1..82cccb2bc 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -26,7 +26,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # tag=v2.4.0 with: results_file: results.sarif results_format: sarif @@ -37,7 +37,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4 with: name: SARIF file path: results.sarif @@ -45,6 +45,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@b611370bb5703a7efb587f9d136a52ea24c5c38c # tag=v3.25.11 + uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6 with: sarif_file: results.sarif diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000..d71bcbc9d --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,31 @@ +name: Stale Check +on: + workflow_dispatch: {} + schedule: + - cron: '16 22 * * *' +permissions: + issues: write + pull-requests: write +jobs: + stale: + if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. + runs-on: ubuntu-latest + steps: + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + # opt out of defaults to avoid marking issues as stale and closing them + # https://github.com/actions/stale#days-before-close + # https://github.com/actions/stale#days-before-stale + days-before-stale: -1 + days-before-close: -1 + # Setting it to empty string to skip comments. + # https://github.com/actions/stale#stale-pr-message + # https://github.com/actions/stale#stale-issue-message + stale-pr-message: '' + stale-issue-message: '' + operations-per-run: 30 + # override days-before-stale, for only marking the pull requests as stale + days-before-pr-stale: 60 + stale-pr-label: stale + exempt-pr-labels: keepalive diff --git a/.golangci.yml b/.golangci.yml index e924fe3d5..303cd33d8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -25,15 +25,34 @@ linters: - loggercheck issues: + max-issues-per-linter: 0 max-same-issues: 0 + # The default exclusions are too aggressive. For one, they + # essentially disable any linting on doc comments. We disable + # default exclusions here and add exclusions fitting our codebase + # further down. + exclude-use-default: false exclude-files: # Skip autogenerated files. - ^.*\.(pb|y)\.go$ exclude-dirs: - # Copied it from a different source + # Copied it from a different source. - storage/remote/otlptranslator/prometheusremotewrite - storage/remote/otlptranslator/prometheus exclude-rules: + - linters: + - errcheck + # Taken from the default exclusions (that are otherwise disabled above). + text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + - linters: + - govet + # We use many Seek methods that do not follow the usual pattern. + text: "stdmethods: method Seek.* should have signature Seek" + - linters: + - revive + # We have stopped at some point to write doc comments on exported symbols. + # TODO(beorn7): Maybe we should enforce this again? There are ~500 offenders right now. + text: exported (.+) should have comment( \(or a comment on this block\))? or be unexported - linters: - gocritic text: "appendAssign" @@ -94,15 +113,14 @@ linters-settings: errorf: false revive: # By default, revive will enable only the linting rules that are named in the configuration file. - # So, it's needed to explicitly set in configuration all required rules. - # The following configuration enables all the rules from the defaults.toml - # https://github.com/mgechev/revive/blob/master/defaults.toml + # So, it's needed to explicitly enable all required rules here. rules: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - name: blank-imports + - name: comment-spacings - name: context-as-argument arguments: - # allow functions with test or bench signatures + # Allow functions with test or bench signatures. - allowTypesBefore: "*testing.T,testing.TB" - name: context-keys-type - name: dot-imports @@ -118,6 +136,8 @@ linters-settings: - name: increment-decrement - name: indent-error-flow - name: package-comments + # TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them. + disabled: true - name: range - name: receiver-naming - name: redefines-builtin-id diff --git a/CHANGELOG.md b/CHANGELOG.md index 850554bf9..37cbea6ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,8 +3,59 @@ ## unreleased * [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200 -* [FEATURE] Remote-Write: Add sender and receiver support for [Remote Write 2.0-rc.2](https://prometheus.io/docs/specs/remote_write_spec_2_0/) specification #14395 #14427 #14444 -* [ENHANCEMENT] Remote-Write: 1.x messages against Remote Write 2.x Receivers will have now correct values for `prometheus_storage__failed_total` in case of partial errors #14444 +* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706 +* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042 + +## 2.54.1 / 2024-08-27 + +* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685 +* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654 +* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538 +* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514 +* [BUGFIX] PromQL: fix native histograms getting corrupted due to vector selector bug in range queries. #14605 + +## 2.54.0 / 2024-08-09 + +Release 2.54 brings a release candidate of a major new version of [Remote Write: 2.0](https://prometheus.io/docs/specs/remote_write_spec_2_0/). +This is experimental at this time and may still change. +Remote-write v2 is enabled by default, but can be disabled via feature-flag `web.remote-write-receiver.accepted-protobuf-messages`. + +* [CHANGE] Remote-Write: `highest_timestamp_in_seconds` and `queue_highest_sent_timestamp_seconds` metrics now initialized to 0. #14437 +* [CHANGE] API: Split warnings from info annotations in API response. #14327 +* [FEATURE] Remote-Write: Version 2.0 experimental, plus metadata in WAL via feature flag `metadata-wal-records` (defaults on). #14395,#14427,#14444 +* [FEATURE] PromQL: add limitk() and limit_ratio() aggregation operators. #12503 +* [ENHANCEMENT] PromQL: Accept underscores in literal numbers, e.g. 1_000_000 for 1 million. #12821 +* [ENHANCEMENT] PromQL: float literal numbers and durations are now interchangeable (experimental). Example: `time() - my_timestamp > 10m`. #9138 +* [ENHANCEMENT] PromQL: use Kahan summation for sum(). #14074,#14362 +* [ENHANCEMENT] PromQL (experimental native histograms): Optimize `histogram_count` and `histogram_sum` functions. #14097 +* [ENHANCEMENT] TSDB: Better support for out-of-order experimental native histogram samples. #14438 +* [ENHANCEMENT] TSDB: Optimise seek within index. #14393 +* [ENHANCEMENT] TSDB: Optimise deletion of stale series. #14307 +* [ENHANCEMENT] TSDB: Reduce locking to optimise adding and removing series. #13286,#14286 +* [ENHANCEMENT] TSDB: Small optimisation: streamline special handling for out-of-order data. #14396,#14584 +* [ENHANCEMENT] Regexps: Optimize patterns with multiple prefixes. #13843,#14368 +* [ENHANCEMENT] Regexps: Optimize patterns containing multiple literal strings. #14173 +* [ENHANCEMENT] AWS SD: expose Primary IPv6 addresses as __meta_ec2_primary_ipv6_addresses. #14156 +* [ENHANCEMENT] Docker SD: add MatchFirstNetwork for containers with multiple networks. #10490 +* [ENHANCEMENT] OpenStack SD: Use `flavor.original_name` if available. #14312 +* [ENHANCEMENT] UI (experimental native histograms): more accurate representation. #13680,#14430 +* [ENHANCEMENT] Agent: `out_of_order_time_window` config option now applies to agent. #14094 +* [ENHANCEMENT] Notifier: Send any outstanding Alertmanager notifications when shutting down. #14290 +* [ENHANCEMENT] Rules: Add label-matcher support to Rules API. #10194 +* [ENHANCEMENT] HTTP API: Add url to message logged on error while sending response. #14209 +* [BUGFIX] TSDB: Exclude OOO chunks mapped after compaction starts (introduced by #14396). #14584 +* [BUGFIX] CLI: escape `|` characters when generating docs. #14420 +* [BUGFIX] PromQL (experimental native histograms): Fix some binary operators between native histogram values. #14454 +* [BUGFIX] TSDB: LabelNames API could fail during compaction. #14279 +* [BUGFIX] TSDB: Fix rare issue where pending OOO read can be left dangling if creating querier fails. #14341 +* [BUGFIX] TSDB: fix check for context cancellation in LabelNamesFor. #14302 +* [BUGFIX] Rules: Fix rare panic on reload. #14366 +* [BUGFIX] Config: In YAML marshalling, do not output a regexp field if it was never set. #14004 +* [BUGFIX] Remote-Write: reject samples with future timestamps. #14304 +* [BUGFIX] Remote-Write: Fix data corruption in remote write if max_sample_age is applied. #14078 +* [BUGFIX] Notifier: Fix Alertmanager discovery not updating under heavy load. #14174 +* [BUGFIX] Regexes: some Unicode characters were not matched by case-insensitive comparison. #14170,#14299 +* [BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515 ## 2.53.1 / 2024-07-10 diff --git a/Makefile.common b/Makefile.common index e3da72ab4..34d65bb56 100644 --- a/Makefile.common +++ b/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.59.1 +GOLANGCI_LINT_VERSION ?= v1.60.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/VERSION b/VERSION index f419e2c6f..3a40665f5 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.53.1 +2.54.1 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index b4737e628..01c8f336b 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -152,6 +152,7 @@ type flagConfig struct { queryConcurrency int queryMaxSamples int RemoteFlushDeadline model.Duration + nameEscapingScheme string featureList []string memlimitRatio float64 @@ -168,6 +169,8 @@ type flagConfig struct { corsRegexString string promlogConfig promlog.Config + + promqlEnableDelayedNameRemoval bool } // setFeatureListOptions sets the corresponding options from the featureList. @@ -234,6 +237,15 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) + case "delayed-compaction": + c.tsdb.EnableDelayedCompaction = true + level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") + case "promql-delayed-name-removal": + c.promqlEnableDelayedNameRemoval = true + level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.") + case "utf8-names": + model.NameValidationScheme = model.UTF8Validation + level.Info(logger).Log("msg", "Experimental UTF-8 support enabled") case "": continue case "promql-at-modifier", "promql-negative-offset": @@ -293,8 +305,8 @@ func main() { a.Flag("config.file", "Prometheus configuration file path."). Default("prometheus.yml").StringVar(&cfg.configFile) - a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). - Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) + a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated."). + Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses) a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory"). Default("0.9").FloatVar(&cfg.memlimitRatio) @@ -308,7 +320,7 @@ func main() { "Maximum duration before timing out read of the request, and closing idle connections."). Default("5m").SetValue(&cfg.webTimeout) - a.Flag("web.max-connections", "Maximum number of simultaneous connections."). + a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). Default("512").IntVar(&cfg.web.MaxConnections) a.Flag("web.external-url", @@ -384,6 +396,9 @@ func main() { serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "[DEPRECATED] This flag has no effect. Overlapping blocks are enabled by default now."). Default("true").Hidden().BoolVar(&b) + serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks."). + Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction) + serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) @@ -478,7 +493,9 @@ func main() { a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) - a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, new-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). + a.Flag("scrape.name-escaping-scheme", `Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots".`).Default(scrape.DefaultNameEscapingScheme.String()).StringVar(&cfg.nameEscapingScheme) + + a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, new-ui, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). Default("").StringsVar(&cfg.featureList) promlogflag.AddFlags(a, &cfg.promlogConfig) @@ -506,6 +523,15 @@ func main() { os.Exit(1) } + if cfg.nameEscapingScheme != "" { + scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme) + if err != nil { + fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme) + os.Exit(1) + } + model.NameEscapingScheme = scheme + } + if agentMode && len(serverOnlyFlags) > 0 { fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags) os.Exit(3) @@ -526,7 +552,7 @@ func main() { localStoragePath = cfg.agentStoragePath } - cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress) + cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddresses[0]) if err != nil { fmt.Fprintln(os.Stderr, fmt.Errorf("parse external URL %q: %w", cfg.prometheusURL, err)) os.Exit(2) @@ -781,9 +807,10 @@ func main() { NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, // EnableAtModifier and EnableNegativeOffset have to be // always on for regular PromQL as of Prometheus v2.33. - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: cfg.enablePerStepStats, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: cfg.enablePerStepStats, + EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval, } queryEngine = promql.NewEngine(opts) @@ -972,15 +999,21 @@ func main() { }) } - listener, err := webHandler.Listener() + listeners, err := webHandler.Listeners() if err != nil { - level.Error(logger).Log("msg", "Unable to start web listener", "err", err) + level.Error(logger).Log("msg", "Unable to start web listeners", "err", err) + if err := queryEngine.Close(); err != nil { + level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + } os.Exit(1) } err = toolkit_web.Validate(*webConfig) if err != nil { level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) + if err := queryEngine.Close(); err != nil { + level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + } os.Exit(1) } @@ -1002,6 +1035,9 @@ func main() { case <-cancel: reloadReady.Close() } + if err := queryEngine.Close(); err != nil { + level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) + } return nil }, func(err error) { @@ -1269,7 +1305,7 @@ func main() { // Web handler. g.Add( func() error { - if err := webHandler.Run(ctxWeb, listener, *webConfig); err != nil { + if err := webHandler.Run(ctxWeb, listeners, *webConfig); err != nil { return fmt.Errorf("error starting web server: %w", err) } return nil @@ -1718,6 +1754,8 @@ type tsdbOptions struct { MaxExemplars int64 EnableMemorySnapshotOnShutdown bool EnableNativeHistograms bool + EnableDelayedCompaction bool + EnableOverlappingCompaction bool } func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { @@ -1738,7 +1776,8 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options { EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableNativeHistograms: opts.EnableNativeHistograms, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, - EnableOverlappingCompaction: true, + EnableDelayedCompaction: opts.EnableDelayedCompaction, + EnableOverlappingCompaction: opts.EnableOverlappingCompaction, } } diff --git a/cmd/promtool/backfill.go b/cmd/promtool/backfill.go index 400cae421..16491f041 100644 --- a/cmd/promtool/backfill.go +++ b/cmd/promtool/backfill.go @@ -85,7 +85,7 @@ func getCompatibleBlockDuration(maxBlockDuration int64) int64 { return blockDuration } -func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool) (returnErr error) { +func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesInAppender int, outputDir string, humanReadable, quiet bool, customLabels map[string]string) (returnErr error) { blockDuration := getCompatibleBlockDuration(maxBlockDuration) mint = blockDuration * (mint / blockDuration) @@ -102,6 +102,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn nextSampleTs int64 = math.MaxInt64 ) + lb := labels.NewBuilder(labels.EmptyLabels()) + for t := mint; t <= maxt; t += blockDuration { tsUpper := t + blockDuration if nextSampleTs != math.MaxInt64 && nextSampleTs >= tsUpper { @@ -162,7 +164,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn l := labels.Labels{} p.Metric(&l) - if _, err := app.Append(0, l, *ts, v); err != nil { + lb.Reset(l) + for name, value := range customLabels { + lb.Set(name, value) + } + lbls := lb.Labels() + + if _, err := app.Append(0, lbls, *ts, v); err != nil { return fmt.Errorf("add sample: %w", err) } @@ -221,13 +229,13 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn return nil } -func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { +func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) (err error) { p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps. maxt, mint, err := getMinAndMaxTimestamps(p) if err != nil { return fmt.Errorf("getting min and max timestamp: %w", err) } - if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet); err != nil { + if err = createBlocks(input, mint, maxt, int64(maxBlockDuration/time.Millisecond), maxSamplesInAppender, outputDir, humanReadable, quiet, customLabels); err != nil { return fmt.Errorf("block creation: %w", err) } return nil diff --git a/cmd/promtool/backfill_test.go b/cmd/promtool/backfill_test.go index 32abfa46a..b818194e8 100644 --- a/cmd/promtool/backfill_test.go +++ b/cmd/promtool/backfill_test.go @@ -92,6 +92,7 @@ func TestBackfill(t *testing.T) { Description string MaxSamplesInAppender int MaxBlockDuration time.Duration + Labels map[string]string Expected struct { MinTime int64 MaxTime int64 @@ -636,6 +637,49 @@ http_requests_total{code="400"} 1024 7199 }, }, }, + { + ToParse: `# HELP http_requests_total The total number of HTTP requests. +# TYPE http_requests_total counter +http_requests_total{code="200"} 1 1624463088.000 +http_requests_total{code="200"} 2 1629503088.000 +http_requests_total{code="200"} 3 1629863088.000 +# EOF +`, + IsOk: true, + Description: "Sample with external labels.", + MaxSamplesInAppender: 5000, + MaxBlockDuration: 2048 * time.Hour, + Labels: map[string]string{"cluster_id": "123", "org_id": "999"}, + Expected: struct { + MinTime int64 + MaxTime int64 + NumBlocks int + BlockDuration int64 + Samples []backfillSample + }{ + MinTime: 1624463088000, + MaxTime: 1629863088000, + NumBlocks: 2, + BlockDuration: int64(1458 * time.Hour / time.Millisecond), + Samples: []backfillSample{ + { + Timestamp: 1624463088000, + Value: 1, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + { + Timestamp: 1629503088000, + Value: 2, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + { + Timestamp: 1629863088000, + Value: 3, + Labels: labels.FromStrings("__name__", "http_requests_total", "code", "200", "cluster_id", "123", "org_id", "999"), + }, + }, + }, + }, { ToParse: `# HELP rpc_duration_seconds A summary of the RPC duration in seconds. # TYPE rpc_duration_seconds summary @@ -689,7 +733,7 @@ after_eof 1 2 outputDir := t.TempDir() - err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration) + err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration, test.Labels) if !test.IsOk { require.Error(t, err, test.Description) diff --git a/cmd/promtool/main.go b/cmd/promtool/main.go index 1c8e1dd1c..e713a177f 100644 --- a/cmd/promtool/main.go +++ b/cmd/promtool/main.go @@ -253,6 +253,7 @@ func main() { importQuiet := importCmd.Flag("quiet", "Do not print created blocks.").Short('q').Bool() maxBlockDuration := importCmd.Flag("max-block-duration", "Maximum duration created blocks may span. Anything less than 2h is ignored.").Hidden().PlaceHolder("").Duration() openMetricsImportCmd := importCmd.Command("openmetrics", "Import samples from OpenMetrics input and produce TSDB blocks. Please refer to the storage docs for more details.") + openMetricsLabels := openMetricsImportCmd.Flag("label", "Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value").StringMap() importFilePath := openMetricsImportCmd.Arg("input file", "OpenMetrics file to read samples from.").Required().String() importDBPath := openMetricsImportCmd.Arg("output directory", "Output directory for generated blocks.").Default(defaultDBPath).String() importRulesCmd := importCmd.Command("rules", "Create blocks of data for new recording rules.") @@ -408,7 +409,7 @@ func main() { os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics))) // TODO(aSquare14): Work on adding support for custom block size. case openMetricsImportCmd.FullCommand(): - os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) + os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration, *openMetricsLabels)) case importRulesCmd.FullCommand(): os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) @@ -471,7 +472,7 @@ func (ls lintConfig) lintDuplicateRules() bool { return ls.all || ls.duplicateRules } -// Check server status - healthy & ready. +// CheckServerStatus - healthy & ready. func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error { if serverURL.Scheme == "" { serverURL.Scheme = "http" diff --git a/cmd/promtool/metrics.go b/cmd/promtool/metrics.go index 6d162f459..4c91d1d6f 100644 --- a/cmd/promtool/metrics.go +++ b/cmd/promtool/metrics.go @@ -31,7 +31,7 @@ import ( "github.com/prometheus/prometheus/util/fmtutil" ) -// Push metrics to a prometheus remote write (for testing purpose only). +// PushMetrics to a prometheus remote write (for testing purpose only). func PushMetrics(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, timeout time.Duration, labels map[string]string, files ...string) int { addressURL, err := url.Parse(url.String()) if err != nil { diff --git a/cmd/promtool/tsdb.go b/cmd/promtool/tsdb.go index 2ed7244b1..971ea8ab0 100644 --- a/cmd/promtool/tsdb.go +++ b/cmd/promtool/tsdb.go @@ -823,7 +823,7 @@ func checkErr(err error) int { return 0 } -func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int { +func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration, customLabels map[string]string) int { inputFile, err := fileutil.OpenMmapFile(path) if err != nil { return checkErr(err) @@ -834,7 +834,7 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB return checkErr(fmt.Errorf("create output dir: %w", err)) } - return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration)) + return checkErr(backfill(5000, inputFile.Bytes(), outputDir, humanReadable, quiet, maxBlockDuration, customLabels)) } func displayHistogram(dataType string, datas []int, total int) { @@ -866,16 +866,16 @@ func displayHistogram(dataType string, datas []int, total int) { fmt.Println() } -func generateBucket(min, max int) (start, end, step int) { - s := (max - min) / 10 +func generateBucket(minVal, maxVal int) (start, end, step int) { + s := (maxVal - minVal) / 10 step = 10 for step < s && step <= 10000 { step *= 10 } - start = min - min%step - end = max - max%step + step + start = minVal - minVal%step + end = maxVal - maxVal%step + step return } diff --git a/cmd/promtool/tsdb_test.go b/cmd/promtool/tsdb_test.go index 75089b168..ffc5467b4 100644 --- a/cmd/promtool/tsdb_test.go +++ b/cmd/promtool/tsdb_test.go @@ -20,6 +20,7 @@ import ( "math" "os" "runtime" + "slices" "strings" "testing" "time" @@ -152,12 +153,18 @@ func TestTSDBDump(t *testing.T) { expectedMetrics, err := os.ReadFile(tt.expectedDump) require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) - // even though in case of one matcher samples are not sorted, the order in the cases above should stay the same. - require.Equal(t, string(expectedMetrics), dumpedMetrics) + // Sort both, because Prometheus does not guarantee the output order. + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) }) } } +func sortLines(buf string) string { + lines := strings.Split(buf, "\n") + slices.Sort(lines) + return strings.Join(lines, "\n") +} + func TestTSDBDumpOpenMetrics(t *testing.T) { storage := promqltest.LoadedStorage(t, ` load 1m @@ -169,7 +176,7 @@ func TestTSDBDumpOpenMetrics(t *testing.T) { require.NoError(t, err) expectedMetrics = normalizeNewLine(expectedMetrics) dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics) - require.Equal(t, string(expectedMetrics), dumpedMetrics) + require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics)) } func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { @@ -179,7 +186,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) { dbDir := t.TempDir() // Import samples from OM format - err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour) + err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour, map[string]string{}) require.NoError(t, err) db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) diff --git a/config/config.go b/config/config.go index 8a6216146..c9e8efbf3 100644 --- a/config/config.go +++ b/config/config.go @@ -67,6 +67,11 @@ var ( } ) +const ( + LegacyValidationConfig = "legacy" + UTF8ValidationConfig = "utf8" +) + // Load parses the YAML input s into a Config. func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { cfg := &Config{} @@ -216,6 +221,7 @@ var ( // DefaultRemoteReadConfig is the default remote read configuration. DefaultRemoteReadConfig = RemoteReadConfig{ RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, HTTPClientConfig: config.DefaultHTTPClientConfig, FilterExternalLabels: true, } @@ -446,6 +452,8 @@ type GlobalConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -471,6 +479,7 @@ var ( PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" + UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", @@ -656,6 +665,8 @@ type ScrapeConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -762,6 +773,19 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + switch globalConfig.MetricNameValidationScheme { + case "", LegacyValidationConfig: + case UTF8ValidationConfig: + if model.NameValidationScheme != model.UTF8Validation { + return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names") + } + default: + return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) + } + if c.MetricNameValidationScheme == "" { + c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme + } + return nil } @@ -1090,8 +1114,9 @@ func (m RemoteWriteProtoMsgs) String() string { } var ( - // RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf - // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/. + // RemoteWriteProtoMsgV1 represents the `prometheus.WriteRequest` protobuf + // message introduced in the https://prometheus.io/docs/specs/remote_write_spec/, + // which will eventually be deprecated. // // NOTE: This string is used for both HTTP header values and config value, so don't change // this reference. @@ -1255,13 +1280,20 @@ type MetadataConfig struct { MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` } +const ( + // DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. + // 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. + DefaultChunkedReadLimit = 5e+7 +) + // RemoteReadConfig is the configuration for reading from remote storage. type RemoteReadConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - ReadRecent bool `yaml:"read_recent,omitempty"` - Name string `yaml:"name,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + ChunkedReadLimit uint64 `yaml:"chunked_read_limit,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + ReadRecent bool `yaml:"read_recent,omitempty"` + Name string `yaml:"name,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/config/config_test.go b/config/config_test.go index 9b074bef1..221906182 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -16,6 +16,7 @@ package config import ( "crypto/tls" "encoding/json" + "fmt" "net/url" "os" "path/filepath" @@ -164,10 +165,11 @@ var expectedConf = &Config{ RemoteReadConfigs: []*RemoteReadConfig{ { - URL: mustParseURL("http://remote1/read"), - RemoteTimeout: model.Duration(1 * time.Minute), - ReadRecent: true, - Name: "default", + URL: mustParseURL("http://remote1/read"), + RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, + ReadRecent: true, + Name: "default", HTTPClientConfig: config.HTTPClientConfig{ FollowRedirects: true, EnableHTTP2: false, @@ -177,6 +179,7 @@ var expectedConf = &Config{ { URL: mustParseURL("http://remote3/read"), RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, ReadRecent: false, Name: "read_special", RequiredMatchers: model.LabelSet{"job": "special"}, @@ -2300,3 +2303,52 @@ func TestScrapeConfigDisableCompression(t *testing.T) { require.False(t, got.ScrapeConfigs[0].EnableCompression) } + +func TestScrapeConfigNameValidationSettings(t *testing.T) { + model.NameValidationScheme = model.UTF8Validation + defer func() { + model.NameValidationScheme = model.LegacyValidation + }() + + tests := []struct { + name string + inputFile string + expectScheme string + }{ + { + name: "blank config implies default", + inputFile: "scrape_config_default_validation_mode", + expectScheme: "", + }, + { + name: "global setting implies local settings", + inputFile: "scrape_config_global_validation_mode", + expectScheme: "utf8", + }, + { + name: "local setting", + inputFile: "scrape_config_local_validation_mode", + expectScheme: "utf8", + }, + { + name: "local setting overrides global setting", + inputFile: "scrape_config_local_global_validation_mode", + expectScheme: "legacy", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger()) + require.NoError(t, err) + + out, err := yaml.Marshal(want) + + require.NoError(t, err) + got := &Config{} + require.NoError(t, yaml.UnmarshalStrict(out, got)) + + require.Equal(t, tc.expectScheme, got.ScrapeConfigs[0].MetricNameValidationScheme) + }) + } +} diff --git a/config/testdata/scrape_config_default_validation_mode.yml b/config/testdata/scrape_config_default_validation_mode.yml new file mode 100644 index 000000000..96680d643 --- /dev/null +++ b/config/testdata/scrape_config_default_validation_mode.yml @@ -0,0 +1,2 @@ +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/scrape_config_global_validation_mode.yml b/config/testdata/scrape_config_global_validation_mode.yml new file mode 100644 index 000000000..154855439 --- /dev/null +++ b/config/testdata/scrape_config_global_validation_mode.yml @@ -0,0 +1,4 @@ +global: + metric_name_validation_scheme: utf8 +scrape_configs: + - job_name: prometheus diff --git a/config/testdata/scrape_config_local_global_validation_mode.yml b/config/testdata/scrape_config_local_global_validation_mode.yml new file mode 100644 index 000000000..d13605e21 --- /dev/null +++ b/config/testdata/scrape_config_local_global_validation_mode.yml @@ -0,0 +1,5 @@ +global: + metric_name_validation_scheme: utf8 +scrape_configs: + - job_name: prometheus + metric_name_validation_scheme: legacy diff --git a/config/testdata/scrape_config_local_validation_mode.yml b/config/testdata/scrape_config_local_validation_mode.yml new file mode 100644 index 000000000..fad423580 --- /dev/null +++ b/config/testdata/scrape_config_local_validation_mode.yml @@ -0,0 +1,3 @@ +scrape_configs: + - job_name: prometheus + metric_name_validation_scheme: utf8 diff --git a/discovery/discoverer_metrics_noop.go b/discovery/discoverer_metrics_noop.go index 638317ace..4321204b6 100644 --- a/discovery/discoverer_metrics_noop.go +++ b/discovery/discoverer_metrics_noop.go @@ -13,7 +13,7 @@ package discovery -// Create a dummy metrics struct, because this SD doesn't have any metrics. +// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics. type NoopDiscovererMetrics struct{} var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) diff --git a/discovery/discovery.go b/discovery/discovery.go index a5826f817..a91faf6c8 100644 --- a/discovery/discovery.go +++ b/discovery/discovery.go @@ -39,7 +39,7 @@ type Discoverer interface { Run(ctx context.Context, up chan<- []*targetgroup.Group) } -// Internal metrics of service discovery mechanisms. +// DiscovererMetrics are internal metrics of service discovery mechanisms. type DiscovererMetrics interface { Register() error Unregister() @@ -56,7 +56,7 @@ type DiscovererOptions struct { HTTPClientOptions []config.HTTPClientOption } -// Metrics used by the "refresh" package. +// RefreshMetrics are used by the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetrics struct { @@ -64,17 +64,18 @@ type RefreshMetrics struct { Duration prometheus.Observer } -// Instantiate the metrics used by the "refresh" package. +// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package. type RefreshMetricsInstantiator interface { Instantiate(mech string) *RefreshMetrics } -// An interface for registering, unregistering, and instantiating metrics for the "refresh" package. -// Refresh metrics are registered and unregistered outside of the service discovery mechanism. -// This is so that the same metrics can be reused across different service discovery mechanisms. -// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are -// specific to that SD. However, doing so would also expose too many unused metrics on -// the Prometheus /metrics endpoint. +// RefreshMetricsManager is an interface for registering, unregistering, and +// instantiating metrics for the "refresh" package. Refresh metrics are +// registered and unregistered outside of the service discovery mechanism. This +// is so that the same metrics can be reused across different service discovery +// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to +// use const labels which are specific to that SD. However, doing so would also +// expose too many unused metrics on the Prometheus /metrics endpoint. type RefreshMetricsManager interface { DiscovererMetrics RefreshMetricsInstantiator @@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return staticDiscoverer(c), nil } -// No metrics are needed for this service discovery mechanism. +// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are +// needed for this service discovery mechanism. func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { return &NoopDiscovererMetrics{} } diff --git a/discovery/eureka/client.go b/discovery/eureka/client.go index 5a90968f1..52e8ce7b4 100644 --- a/discovery/eureka/client.go +++ b/discovery/eureka/client.go @@ -97,7 +97,6 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode) } diff --git a/discovery/hetzner/robot.go b/discovery/hetzner/robot.go index 64155bfae..516470b05 100644 --- a/discovery/hetzner/robot.go +++ b/discovery/hetzner/robot.go @@ -87,7 +87,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) resp.Body.Close() }() - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode) } diff --git a/discovery/kubernetes/endpoints_test.go b/discovery/kubernetes/endpoints_test.go index e877657db..3ea98c5db 100644 --- a/discovery/kubernetes/endpoints_test.go +++ b/discovery/kubernetes/endpoints_test.go @@ -970,7 +970,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) { }.Run(t) } -// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. +// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes. // See https://github.com/prometheus/prometheus/issues/11305 for more details. func TestEndpointsDiscoveryUpdatePod(t *testing.T) { pod := &v1.Pod{ diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index 552f8a445..50f25a20a 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -154,7 +154,7 @@ func (d k8sDiscoveryTest) Run(t *testing.T) { // readResultWithTimeout reads all targetgroups from channel with timeout. // It merges targetgroups by source and sends the result to result channel. -func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { +func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, maxGroups int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) { res := make(map[string]*targetgroup.Group) timeout := time.After(stopAfter) Loop: @@ -167,7 +167,7 @@ Loop: } res[tg.Source] = tg } - if len(res) == max { + if len(res) == maxGroups { // Reached max target groups we may get, break fast. break Loop } @@ -175,10 +175,10 @@ Loop: // Because we use queue, an object that is created then // deleted or updated may be processed only once. // So possibly we may skip events, timed out here. - t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) + t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), maxGroups) break Loop case <-ctx.Done(): - t.Logf("stopped, got %d (max: %d) items", len(res), max) + t.Logf("stopped, got %d (max: %d) items", len(res), maxGroups) break Loop } } diff --git a/discovery/manager.go b/discovery/manager.go index 897d7d151..cefa90a86 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} { return p.config } -// Registers the metrics needed for SD mechanisms. +// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms. // Does not register the metrics for the Discovery Manager. // TODO(ptodev): Add ability to unregister the metrics? func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { @@ -212,9 +212,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.metrics.FailedConfigs.Set(float64(failedCount)) var ( - wg sync.WaitGroup - // keep shows if we keep any providers after reload. - keep bool + wg sync.WaitGroup newProviders []*Provider ) for _, prov := range m.providers { @@ -228,13 +226,12 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { continue } newProviders = append(newProviders, prov) - // refTargets keeps reference targets used to populate new subs' targets + // refTargets keeps reference targets used to populate new subs' targets as they should be the same. var refTargets map[string]*targetgroup.Group prov.mu.Lock() m.targetsMtx.Lock() for s := range prov.subs { - keep = true refTargets = m.targets[poolKey{s, prov.name}] // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { @@ -267,7 +264,9 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // While startProvider does pull the trigger, it may take some time to do so, therefore // we pull the trigger as soon as possible so that downstream managers can populate their state. // See https://github.com/prometheus/prometheus/pull/8639 for details. - if keep { + // This also helps making the downstream managers drop stale targets as soon as possible. + // See https://github.com/prometheus/prometheus/pull/13147 for details. + if len(m.providers) > 0 { select { case m.triggerSend <- struct{}{}: default: @@ -288,7 +287,9 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D name: {}, }, } + m.mtx.Lock() m.providers = append(m.providers, p) + m.mtx.Unlock() m.startProvider(ctx, p) } @@ -393,8 +394,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { m.targets[poolKey] = make(map[string]*targetgroup.Group) } for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + // Some Discoverers send nil target group so need to check for it to avoid panics. + if tg == nil { + continue + } + if len(tg.Targets) > 0 { m.targets[poolKey][tg.Source] = tg + } else { + // The target group is empty, drop the corresponding entry to avoid leaks. + // In case the group yielded targets before, allGroups() will take care of making consumers drop them. + delete(m.targets[poolKey], tg.Source) } } } @@ -403,19 +412,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { tSets := map[string][]*targetgroup.Group{} n := map[string]int{} + m.mtx.RLock() m.targetsMtx.Lock() - defer m.targetsMtx.Unlock() - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) + for _, p := range m.providers { + p.mu.RLock() + for s := range p.subs { + // Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers. + // See: https://github.com/prometheus/prometheus/issues/12858 for details. + if _, ok := tSets[s]; !ok { + tSets[s] = []*targetgroup.Group{} + n[s] = 0 + } + if tsets, ok := m.targets[poolKey{s, p.name}]; ok { + for _, tg := range tsets { + tSets[s] = append(tSets[s], tg) + n[s] += len(tg.Targets) + } + } } + p.mu.RUnlock() } + m.targetsMtx.Unlock() + m.mtx.RUnlock() + for setName, v := range n { m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } + return tSets } diff --git a/discovery/manager_test.go b/discovery/manager_test.go index be07edbdb..831cefe51 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -939,11 +939,13 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) { discoveryManager.ApplyConfig(c) // Original targets should be present as soon as possible. + // An empty list should be sent for prometheus2 to drop any stale targets syncedTargets = <-discoveryManager.SyncCh() mu.Unlock() - require.Len(t, syncedTargets, 1) + require.Len(t, syncedTargets, 2) verifySyncedPresence(t, syncedTargets, "prometheus", "{__address__=\"foo:9090\"}", true) require.Len(t, syncedTargets["prometheus"], 1) + require.Empty(t, syncedTargets["prometheus2"]) // prometheus2 configs should be ready on second sync. syncedTargets = <-discoveryManager.SyncCh() @@ -1049,8 +1051,8 @@ func TestDiscovererConfigs(t *testing.T) { } // TestTargetSetRecreatesEmptyStaticConfigs ensures that reloading a config file after -// removing all targets from the static_configs sends an update with empty targetGroups. -// This is required to signal the receiver that this target set has no current targets. +// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update. +// The update is required to signal the consumers that the previous targets should be dropped. func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1083,16 +1085,14 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { discoveryManager.ApplyConfig(c) syncedTargets = <-discoveryManager.SyncCh() + require.Len(t, discoveryManager.targets, 1) p = pk("static", "prometheus", 1) targetGroups, ok := discoveryManager.targets[p] - require.True(t, ok, "'%v' should be present in target groups", p) - group, ok := targetGroups[""] - require.True(t, ok, "missing '' key in target groups %v", targetGroups) - - require.Empty(t, group.Targets, "Invalid number of targets.") - require.Len(t, syncedTargets, 1) - require.Len(t, syncedTargets["prometheus"], 1) - require.Nil(t, syncedTargets["prometheus"][0].Labels) + require.True(t, ok, "'%v' should be present in targets", p) + // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436. + require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p) + require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.") + require.Empty(t, syncedTargets["prometheus"], 0) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1275,6 +1275,7 @@ func TestCoordinationWithReceiver(t *testing.T) { Targets: []model.LabelSet{{"__instance__": "1"}}, }, }, + "mock1": {}, }, }, { diff --git a/discovery/metrics_refresh.go b/discovery/metrics_refresh.go index d621165ce..ef49e591a 100644 --- a/discovery/metrics_refresh.go +++ b/discovery/metrics_refresh.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// Metric vectors for the "refresh" package. +// RefreshMetricsVecs are metric vectors for the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetricsVecs struct { diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 11445092e..68f6fe3cc 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -19,6 +19,7 @@ import ( "net" "net/http" "net/url" + "sort" "strconv" "time" @@ -251,28 +252,26 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er } if d.matchFirstNetwork && len(networks) > 1 { - // Match user defined network - if containerNetworkMode.IsUserDefined() { - networkMode := string(containerNetworkMode) - networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]} - } else { - // Get first network if container network mode has "none" value. - // This case appears under certain condition: - // 1. Container created with network set to "--net=none". - // 2. Disconnect network "none". - // 3. Reconnect network with user defined networks. - var first string - for k, n := range networks { - if n != nil { - first = k - break - } + // Sort networks by name and take first non-nil network. + keys := make([]string, 0, len(networks)) + for k, n := range networks { + if n != nil { + keys = append(keys, k) } - networks = map[string]*network.EndpointSettings{first: networks[first]} + } + if len(keys) > 0 { + sort.Strings(keys) + firstNetworkMode := keys[0] + firstNetwork := networks[firstNetworkMode] + networks = map[string]*network.EndpointSettings{firstNetworkMode: firstNetwork} } } for _, n := range networks { + if n == nil { + continue + } + var added bool for _, p := range c.Ports { diff --git a/discovery/moby/docker_test.go b/discovery/moby/docker_test.go index c108ddf58..398393a15 100644 --- a/discovery/moby/docker_test.go +++ b/discovery/moby/docker_test.go @@ -60,9 +60,9 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 6) + require.Len(t, tg.Targets, 8) - for i, lbls := range []model.LabelSet{ + expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", "__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca", @@ -163,7 +163,43 @@ host: %s "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, - } { + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + } + sortFunc(expected) + sortFunc(tg.Targets) + + for i, lbls := range expected { t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) { require.Equal(t, lbls, tg.Targets[i]) }) @@ -202,13 +238,8 @@ host: %s tg := tgs[0] require.NotNil(t, tg) require.NotNil(t, tg.Targets) - require.Len(t, tg.Targets, 9) + require.Len(t, tg.Targets, 13) - sortFunc := func(labelSets []model.LabelSet) { - sort.Slice(labelSets, func(i, j int) bool { - return labelSets[i]["__address__"] < labelSets[j]["__address__"] - }) - } expected := []model.LabelSet{ { "__address__": "172.19.0.2:9100", @@ -359,6 +390,70 @@ host: %s "__meta_docker_network_scope": "local", "__meta_docker_port_private": "9104", }, + { + "__address__": "172.20.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.20.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.20.0.3", + "__meta_docker_network_name": "dockersd_private", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, + { + "__address__": "172.21.0.3:3306", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "3306", + }, + { + "__address__": "172.21.0.3:33060", + "__meta_docker_container_id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "__meta_docker_container_label_com_docker_compose_project": "dockersd", + "__meta_docker_container_label_com_docker_compose_service": "mysql", + "__meta_docker_container_label_com_docker_compose_version": "2.2.2", + "__meta_docker_container_name": "/dockersd_multi_networks", + "__meta_docker_container_network_mode": "dockersd_private_none", + "__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "__meta_docker_network_ingress": "false", + "__meta_docker_network_internal": "false", + "__meta_docker_network_ip": "172.21.0.3", + "__meta_docker_network_name": "dockersd_private1", + "__meta_docker_network_scope": "local", + "__meta_docker_port_private": "33060", + }, } sortFunc(expected) @@ -370,3 +465,9 @@ host: %s }) } } + +func sortFunc(labelSets []model.LabelSet) { + sort.Slice(labelSets, func(i, j int) bool { + return labelSets[i]["__address__"] < labelSets[j]["__address__"] + }) +} diff --git a/discovery/moby/testdata/dockerprom/containers/json.json b/discovery/moby/testdata/dockerprom/containers/json.json index ebfc56b6d..33406bf9a 100644 --- a/discovery/moby/testdata/dockerprom/containers/json.json +++ b/discovery/moby/testdata/dockerprom/containers/json.json @@ -228,5 +228,74 @@ "Networks": {} }, "Mounts": [] + }, + { + "Id": "f84b2a0cfaa58d9e70b0657e2b3c6f44f0e973de4163a871299b4acf127b224f", + "Names": [ + "/dockersd_multi_networks" + ], + "Image": "mysql:5.7.29", + "ImageID": "sha256:16ae2f4625ba63a250462bedeece422e741de9f0caf3b1d89fd5b257aca80cd1", + "Command": "mysqld", + "Created": 1616273136, + "Ports": [ + { + "PrivatePort": 3306, + "Type": "tcp" + }, + { + "PrivatePort": 33060, + "Type": "tcp" + } + ], + "Labels": { + "com.docker.compose.project": "dockersd", + "com.docker.compose.service": "mysql", + "com.docker.compose.version": "2.2.2" + }, + "State": "running", + "Status": "Up 40 seconds", + "HostConfig": { + "NetworkMode": "dockersd_private_none" + }, + "NetworkSettings": { + "Networks": { + "dockersd_private": { + "IPAMConfig": null, + "Links": null, + "Aliases": null, + "NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601", + "EndpointID": "972d6807997369605ace863af58de6cb90c787a5bf2ffc4105662d393ae539b7", + "Gateway": "172.20.0.1", + "IPAddress": "172.20.0.3", + "IPPrefixLen": 16, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:14:00:02", + "DriverOpts": null + }, + "dockersd_private1": { + "IPAMConfig": {}, + "Links": null, + "Aliases": [ + "mysql", + "mysql", + "f9ade4b83199" + ], + "NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8", + "EndpointID": "91a98405344ee1cb7d977cafabe634837876651544b32da20a5e0155868e6f5f", + "Gateway": "172.21.0.1", + "IPAddress": "172.21.0.3", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:ac:15:00:02", + "DriverOpts": null + } + } + }, + "Mounts": [] } ] diff --git a/discovery/util.go b/discovery/util.go index 83cc640dd..4e2a08851 100644 --- a/discovery/util.go +++ b/discovery/util.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// A utility to be used by implementations of discovery.Discoverer -// which need to manage the lifetime of their metrics. +// MetricRegisterer is used by implementations of discovery.Discoverer that need +// to manage the lifetime of their metrics. type MetricRegisterer interface { RegisterMetrics() error UnregisterMetrics() @@ -34,7 +34,7 @@ type metricRegistererImpl struct { var _ MetricRegisterer = &metricRegistererImpl{} -// Creates an instance of a MetricRegisterer. +// NewMetricRegisterer creates an instance of a MetricRegisterer. // Typically called inside the implementation of the NewDiscoverer() method. func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { return &metricRegistererImpl{ diff --git a/docs/command-line/prometheus.md b/docs/command-line/prometheus.md index 219a2aa87..87cf5a431 100644 --- a/docs/command-line/prometheus.md +++ b/docs/command-line/prometheus.md @@ -15,11 +15,11 @@ The Prometheus monitoring server | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | | --version | Show application version. | | | --config.file | Prometheus configuration file path. | `prometheus.yml` | -| --web.listen-address | Address to listen on for UI, API, and telemetry. | `0.0.0.0:9090` | +| --web.listen-address ... | Address to listen on for UI, API, and telemetry. Can be repeated. | `0.0.0.0:9090` | | --auto-gomemlimit.ratio | The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory | `0.9` | | --web.config.file | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | --web.read-timeout | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | -| --web.max-connections | Maximum number of simultaneous connections. | `512` | +| --web.max-connections | Maximum number of simultaneous connections across all listeners. | `512` | | --web.external-url | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | --web.route-prefix | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | --web.user-assets | Path to static asset directory, available at /user. | | @@ -56,7 +56,8 @@ The Prometheus monitoring server | --query.timeout | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | --query.max-concurrency | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | --query.max-samples | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | -| --enable-feature | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, new-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | +| --scrape.name-escaping-scheme | Method for escaping legacy invalid names when sending to Prometheus that does not support UTF-8. Can be one of "values", "underscores", or "dots". | `values` | +| --enable-feature ... | Comma separated feature names to enable. Valid options: agent, auto-gomaxprocs, auto-gomemlimit, concurrent-rule-eval, created-timestamp-zero-ingestion, delayed-compaction, exemplar-storage, expand-external-labels, extra-scrape-metrics, memory-snapshot-on-shutdown, native-histograms, new-service-discovery-manager, new-ui, no-default-scrape-port, otlp-write-receiver, promql-experimental-functions, promql-delayed-name-removal, promql-per-step-stats, remote-write-receiver (DEPRECATED), utf8-names. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | --log.level | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | --log.format | Output format of log messages. One of: [logfmt, json] | `logfmt` | diff --git a/docs/command-line/promtool.md b/docs/command-line/promtool.md index 6bb80169a..6d74200e6 100644 --- a/docs/command-line/promtool.md +++ b/docs/command-line/promtool.md @@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system. | -h, --help | Show context-sensitive help (also try --help-long and --help-man). | | --version | Show application version. | | --experimental | Enable experimental commands. | -| --enable-feature | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | +| --enable-feature ... | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | @@ -281,7 +281,7 @@ Run series query. | Flag | Description | | --- | --- | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | @@ -309,7 +309,7 @@ Run labels query. | --- | --- | | --start | Start time (RFC3339 or Unix timestamp). | | --end | End time (RFC3339 or Unix timestamp). | -| --match | Series selector. Can be specified multiple times. | +| --match ... | Series selector. Can be specified multiple times. | @@ -338,7 +338,7 @@ Run queries against your Prometheus to analyze the usage pattern of certain metr | --type | Type of metric: histogram. | | | --duration | Time frame to analyze. | `1h` | | --time | Query time (RFC3339 or Unix timestamp), defaults to now. | | -| --match | Series selector. Can be specified multiple times. | | +| --match ... | Series selector. Can be specified multiple times. | | @@ -461,7 +461,7 @@ Unit tests for rules. | Flag | Description | Default | | --- | --- | --- | -| --run | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | +| --run ... | If set, will only run test groups whose names match the regular expression. Can be specified multiple times. | | | --diff | [Experimental] Print colored differential output between expected & received output. | `false` | @@ -578,7 +578,7 @@ Dump samples from a TSDB. | --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -605,7 +605,7 @@ Dump samples from a TSDB. | --sandbox-dir-root | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` | | --min-time | Minimum timestamp to dump. | `-9223372036854775808` | | --max-time | Maximum timestamp to dump. | `9223372036854775807` | -| --match | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | +| --match ... | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` | @@ -641,6 +641,15 @@ Import samples from OpenMetrics input and produce TSDB blocks. Please refer to t +###### Flags + +| Flag | Description | +| --- | --- | +| --label | Label to attach to metrics. Can be specified multiple times. Example --label=label_name=label_value | + + + + ###### Arguments | Argument | Description | Default | Required | diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 313a7f2f3..a42126cf2 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -70,7 +70,7 @@ global: # How frequently to evaluate rules. [ evaluation_interval: | default = 1m ] - + # Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received. # Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping. [ rule_query_offset: | default = 0s ] @@ -121,6 +121,11 @@ global: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] + # Specifies the validation scheme for metric and label names. Either blank or + # "legacy" for letters, numbers, colons, and underscores; or "utf8" for full + # UTF-8 support. + [ metric_name_validation_scheme | default "legacy" ] + runtime: # Configure the Go garbage collector GOGC parameter # See: https://tip.golang.org/doc/gc-guide#GOGC @@ -302,6 +307,17 @@ tls_config: [ proxy_connect_header: [ : [, ...] ] ] +# Custom HTTP headers to be sent along with each request. +# Headers that are set by Prometheus itself can't be overwritten. +http_headers: + # Header name. + [ : + # Header values. + [ values: [, ...] ] + # Headers values. Hidden in configuration page. + [ secrets: [, ...] ] + # Files to read header values from. + [ files: [, ...] ] ] # List of Azure service discovery configurations. azure_sd_configs: @@ -461,6 +477,11 @@ metric_relabel_configs: # that will be kept in memory. 0 means no limit. [ keep_dropped_targets: | default = 0 ] +# Specifies the validation scheme for metric and label names. Either blank or +# "legacy" for letters, numbers, colons, and underscores; or "utf8" for full +# UTF-8 support. +[ metric_name_validation_scheme | default "legacy" ] + # Limit on total number of positive and negative buckets allowed in a single # native histogram. The resolution of a histogram with more buckets will be # reduced until the number of buckets is within the limit. If the limit cannot @@ -947,7 +968,9 @@ tls_config: # The host to use if the container is in host networking mode. [ host_networking_host: | default = "localhost" ] -# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets. +# Sort all non-nil networks in ascending order based on network name and +# get the first network if the container has multiple networks defined, +# thus avoiding collecting duplicate targets. [ match_first_network: | default = true ] # Optional filters to limit the discovery process to a subset of available @@ -3265,12 +3288,16 @@ Initially, aside from the configured per-target labels, a target's `job` label is set to the `job_name` value of the respective scrape configuration. The `__address__` label is set to the `:` address of the target. After relabeling, the `instance` label is set to the value of `__address__` by default if -it was not set during relabeling. The `__scheme__` and `__metrics_path__` labels -are set to the scheme and metrics path of the target respectively. The `__param_` -label is set to the value of the first passed URL parameter called ``. +it was not set during relabeling. + +The `__scheme__` and `__metrics_path__` labels +are set to the scheme and metrics path of the target respectively, as specified in `scrape_config`. + +The `__param_` +label is set to the value of the first passed URL parameter called ``, as defined in `scrape_config`. The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's -interval and timeout. +interval and timeout, as specified in `scrape_config`. Additional labels prefixed with `__meta_` may be available during the relabeling phase. They are set by the service discovery mechanism that provided diff --git a/docs/feature_flags.md b/docs/feature_flags.md index 2c0ebd8c2..c9289e445 100644 --- a/docs/feature_flags.md +++ b/docs/feature_flags.md @@ -200,8 +200,9 @@ won't work when you push OTLP metrics. `--enable-feature=promql-experimental-functions` -Enables PromQL functions that are considered experimental and whose name or -semantics could change. +Enables PromQL functions that are considered experimental. These functions +might change their name, syntax, or semantics. They might also get removed +entirely. ## Created Timestamps Zero Injection @@ -240,3 +241,33 @@ metadata changes as WAL records on a per-series basis. This must be used if you are also using remote write 2.0 as it will only gather metadata from the WAL. + +## Delay compaction start time + +`--enable-feature=delayed-compaction` + +A random offset, up to `10%` of the chunk range, is added to the Head compaction start time. This assists Prometheus instances in avoiding simultaneous compactions and reduces the load on shared resources. + +Only auto Head compactions and the operations directly resulting from them are subject to this delay. + +In the event of multiple consecutive Head compactions being possible, only the first compaction experiences this delay. + +Note that during this delay, the Head continues its usual operations, which include serving and appending series. + +Despite the delay in compaction, the blocks produced are time-aligned in the same manner as they would be if the delay was not in place. + +## Delay __name__ label removal for PromQL engine + +`--enable-feature=promql-delayed-name-removal` + +When enabled, Prometheus will change the way in which the `__name__` label is removed from PromQL query results (for functions and expressions for which this is necessary). Specifically, it will delay the removal to the last step of the query evaluation, instead of every time an expression or function creating derived metrics is evaluated. + +This allows optionally preserving the `__name__` label via the `label_replace` and `label_join` functions, and helps prevent the "vector cannot contain metrics with the same labelset" error, which can happen when applying a regex-matcher to the `__name__` label. + +## UTF-8 Name Support + +`--enable-feature=utf8-names` + +When enabled, changes the metric and label name validation scheme inside Prometheus to allow the full UTF-8 character set. +By itself, this flag does not enable the request of UTF-8 names via content negotiation. +Users will also have to set `metric_name_validation_scheme` in scrape configs to enable the feature either on the global config or on a per-scrape config basis. diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 304c9f07d..81ffb4e0f 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -41,7 +41,7 @@ vector is the only type which can be graphed. _Notes about the experimental native histograms:_ * Ingesting native histograms has to be enabled via a [feature - flag](../../feature_flags.md#native-histograms). + flag](../feature_flags.md#native-histograms). * Once native histograms have been ingested into the TSDB (and even after disabling the feature flag again), both instant vectors and range vectors may now contain samples that aren't simple floating point numbers (float samples) diff --git a/docs/querying/functions.md b/docs/querying/functions.md index de65e693d..e13628c5c 100644 --- a/docs/querying/functions.md +++ b/docs/querying/functions.md @@ -617,9 +617,9 @@ Like `sort`, `sort_desc` only affects the results of instant queries, as range q ## `sort_by_label()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** -`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by their label values and sample value in case of label values being equal, in ascending order. +`sort_by_label(v instant-vector, label string, ...)` returns vector elements sorted by the values of the given labels in ascending order. In case these label values are equal, elements are sorted by their full label sets. Please note that the sort by label functions only affect the results of instant queries, as range query results always have a fixed output ordering. @@ -627,7 +627,7 @@ This function uses [natural sort order](https://en.wikipedia.org/wiki/Natural_so ## `sort_by_label_desc()` -**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.** +**This function has to be enabled via the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions`.** Same as `sort_by_label`, but sorts in descending order. @@ -676,7 +676,7 @@ over time and return an instant vector with per-series aggregation results: * `last_over_time(range-vector)`: the most recent point value in the specified interval. * `present_over_time(range-vector)`: the value 1 for any series in the specified interval. -If the [feature flag](../feature_flags/) +If the [feature flag](../feature_flags.md#experimental-promql-functions) `--enable-feature=promql-experimental-functions` is set, the following additional functions are available: diff --git a/documentation/examples/remote_storage/example_write_adapter/README.md b/documentation/examples/remote_storage/example_write_adapter/README.md index 739cf3be3..968d2b25c 100644 --- a/documentation/examples/remote_storage/example_write_adapter/README.md +++ b/documentation/examples/remote_storage/example_write_adapter/README.md @@ -19,7 +19,7 @@ remote_write: protobuf_message: "io.prometheus.write.v2.Request" ``` -or for deprecated Remote Write 1.0 message: +or for the eventually deprecated Remote Write 1.0 message: ```yaml remote_write: diff --git a/documentation/examples/remote_storage/go.mod b/documentation/examples/remote_storage/go.mod index 4c41a6606..e5e052469 100644 --- a/documentation/examples/remote_storage/go.mod +++ b/documentation/examples/remote_storage/go.mod @@ -8,19 +8,19 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/influxdata/influxdb v1.11.5 - github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/client_golang v1.20.0 + github.com/prometheus/common v0.57.0 github.com/prometheus/prometheus v0.53.1 github.com/stretchr/testify v1.9.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.53.16 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -35,8 +35,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/kr/text v0.2.0 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -56,14 +55,14 @@ require ( go.opentelemetry.io/otel/trace v1.27.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/grpc v1.65.0 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/documentation/examples/remote_storage/go.sum b/documentation/examples/remote_storage/go.sum index 9898d75d7..34c474ef8 100644 --- a/documentation/examples/remote_storage/go.sum +++ b/documentation/examples/remote_storage/go.sum @@ -1,9 +1,9 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= @@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc= -github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -37,9 +37,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= +github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -50,8 +49,6 @@ github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LO github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -190,8 +187,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -256,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= +github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -267,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= +github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -279,8 +276,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc= github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -326,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -347,8 +344,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= @@ -376,11 +373,11 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -405,8 +402,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1: google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/go.mod b/go.mod index c74452cd5..9a4a35c4d 100644 --- a/go.mod +++ b/go.mod @@ -13,15 +13,15 @@ require ( github.com/KimMachineGun/automemlimit v0.6.1 github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 - github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go v1.55.5 github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 github.com/cespare/xxhash/v2 v2.3.0 github.com/dennwc/varint v1.0.0 github.com/digitalocean/godo v1.119.0 - github.com/docker/docker v27.0.3+incompatible + github.com/docker/docker v27.1.1+incompatible github.com/edsrzf/mmap-go v1.1.0 github.com/envoyproxy/go-control-plane v0.12.0 - github.com/envoyproxy/protoc-gen-validate v1.0.4 + github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/fsnotify/fsnotify v1.7.0 github.com/go-kit/log v0.2.1 @@ -36,10 +36,10 @@ require ( github.com/gophercloud/gophercloud v1.14.0 github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/hashicorp/consul/api v1.29.2 + github.com/hashicorp/consul/api v1.29.4 github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 - github.com/hetznercloud/hcloud-go/v2 v2.12.0 - github.com/ionos-cloud/sdk-go/v6 v6.1.11 + github.com/hetznercloud/hcloud-go/v2 v2.13.1 + github.com/ionos-cloud/sdk-go/v6 v6.2.1 github.com/json-iterator/go v1.1.12 github.com/klauspost/compress v1.17.9 github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b @@ -52,9 +52,9 @@ require ( github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.6.0 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 + github.com/prometheus/common v0.56.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/exporter-toolkit v0.11.0 @@ -75,15 +75,14 @@ require ( go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 - golang.org/x/net v0.27.0 - golang.org/x/oauth2 v0.21.0 + golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.22.0 golang.org/x/text v0.16.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.23.0 - google.golang.org/api v0.189.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d + google.golang.org/api v0.190.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 @@ -96,7 +95,7 @@ require ( ) require ( - cloud.google.com/go/auth v0.7.2 // indirect + cloud.google.com/go/auth v0.7.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -140,9 +139,9 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect @@ -190,8 +189,9 @@ require ( golang.org/x/crypto v0.25.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/term v0.22.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gotest.tools/v3 v3.0.3 // indirect diff --git a/go.sum b/go.sum index d96710c17..7e763af26 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE= -cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= +cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= +cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -92,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= @@ -149,8 +149,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= -github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -171,8 +171,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= @@ -322,8 +322,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g= github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -332,8 +332,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= -github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -353,8 +353,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= -github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= +github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= +github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -414,8 +414,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtx github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hetznercloud/hcloud-go/v2 v2.12.0 h1:nOgfNTo0gyXZJJdM8mo/XH5MO/e80wAEpldRzdWayhY= -github.com/hetznercloud/hcloud-go/v2 v2.12.0/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= +github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= +github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -423,8 +423,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= -github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= +github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -608,8 +608,8 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= +github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -625,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.56.0 h1:UffReloqkBtvtQEYDg2s+uDPGRrJyC6vZWPGXf6OhPY= +github.com/prometheus/common v0.56.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= @@ -865,8 +865,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1047,8 +1047,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI= -google.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8= +google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= +google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1085,10 +1085,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= +google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= diff --git a/model/exemplar/exemplar.go b/model/exemplar/exemplar.go index 08f55374e..2c28b1725 100644 --- a/model/exemplar/exemplar.go +++ b/model/exemplar/exemplar.go @@ -15,7 +15,9 @@ package exemplar import "github.com/prometheus/prometheus/model/labels" -// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters +// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of +// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 +// UTF-8 characters." // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars const ExemplarMaxLabelSetLength = 128 @@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } -// Sort first by timestamp, then value, then labels. +// Compare first timestamps, then values, then labels. func Compare(a, b Exemplar) int { if a.Ts < b.Ts { return -1 diff --git a/model/labels/labels.go b/model/labels/labels.go index cd30f4f8f..f4de7496c 100644 --- a/model/labels/labels.go +++ b/model/labels/labels.go @@ -315,7 +315,8 @@ func Compare(a, b Labels) int { return len(a) - len(b) } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { (*ls) = append((*ls)[:0], b...) } @@ -422,7 +423,7 @@ type ScratchBuilder struct { add Labels } -// Symbol-table is no-op, just for api parity with dedupelabels. +// SymbolTable is no-op, just for api parity with dedupelabels. type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } @@ -458,7 +459,7 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } -// Add a name/value pair, using []byte instead of string. +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. // The '-tags stringlabels' version of this function is unsafe, hence the name. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { @@ -475,14 +476,14 @@ func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } -// Return the name/value pairs added so far as a Labels object. +// Labels returns the name/value pairs added so far as a Labels object. // Note: if you want them sorted, call Sort() first. func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } -// Write the newly-built Labels out to ls. +// Overwrite the newly-built Labels out to ls. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { *ls = append((*ls)[:0], b.add...) diff --git a/model/labels/labels_common.go b/model/labels/labels_common.go index 6db86b03c..d7bdc1e07 100644 --- a/model/labels/labels_common.go +++ b/model/labels/labels_common.go @@ -95,12 +95,23 @@ func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { } // IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { +func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax + if l.Name == model.MetricNameLabel { + // If the default validation scheme has been overridden with legacy mode, + // we need to call the special legacy validation checker. + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + return strconv.ErrSyntax + } + if !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation { + if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { return strconv.ErrSyntax } return nil diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index d8910cdc8..920890831 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" ) @@ -272,11 +273,86 @@ func TestLabels_IsValid(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - require.Equal(t, test.expected, test.input.IsValid()) + require.Equal(t, test.expected, test.input.IsValid(model.LegacyValidation)) }) } } +func TestLabels_ValidationModes(t *testing.T) { + for _, test := range []struct { + input Labels + globalMode model.ValidationScheme + callMode model.ValidationScheme + expected bool + }{ + { + input: FromStrings( + "__name__", "test.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.UTF8Validation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test", + "\xc5 bad utf8", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.UTF8Validation, + expected: false, + }, + { + // Setting the common model to legacy validation and then trying to check for UTF-8 on a + // per-call basis is not supported. + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.LegacyValidation, + callMode: model.UTF8Validation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.LegacyValidation, + callMode: model.LegacyValidation, + expected: true, + }, + { + input: FromStrings( + "__name__", "test.utf8.metric", + "hostname", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.LegacyValidation, + expected: false, + }, + { + input: FromStrings( + "__name__", "test", + "host.name", "localhost", + "job", "check", + ), + globalMode: model.UTF8Validation, + callMode: model.LegacyValidation, + expected: false, + }, + } { + model.NameValidationScheme = test.globalMode + require.Equal(t, test.expected, test.input.IsValid(test.callMode)) + } +} + func TestLabels_Equal(t *testing.T) { labels := FromStrings( "aaa", "111", diff --git a/model/textparse/interface.go b/model/textparse/interface.go index df01dbc34..0b5d9281e 100644 --- a/model/textparse/interface.go +++ b/model/textparse/interface.go @@ -106,8 +106,8 @@ const ( EntryInvalid Entry = -1 EntryType Entry = 0 EntryHelp Entry = 1 - EntrySeries Entry = 2 // A series with a simple float64 as value. + EntrySeries Entry = 2 // EntrySeries marks a series with a simple float64 as value. EntryComment Entry = 3 EntryUnit Entry = 4 - EntryHistogram Entry = 5 // A series with a native histogram as a value. + EntryHistogram Entry = 5 // EntryHistogram marks a series with a native histogram as a value. ) diff --git a/model/textparse/openmetricsparse.go b/model/textparse/openmetricsparse.go index b7ad1dd85..5f0415d3e 100644 --- a/model/textparse/openmetricsparse.go +++ b/model/textparse/openmetricsparse.go @@ -94,16 +94,46 @@ type OpenMetricsParser struct { exemplarVal float64 exemplarTs int64 hasExemplarTs bool + + skipCTSeries bool } -// NewOpenMetricsParser returns a new parser of the byte slice. -func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { - return &OpenMetricsParser{ - l: &openMetricsLexer{b: b}, - builder: labels.NewScratchBuilderWithSymbolTable(st, 16), +type openMetricsParserOptions struct { + SkipCTSeries bool +} + +type OpenMetricsOption func(*openMetricsParserOptions) + +// WithOMParserCTSeriesSkipped turns off exposing _created lines +// as series, which makes those only used for parsing created timestamp +// for `CreatedTimestamp` method purposes. +// +// It's recommended to use this option to avoid using _created lines for other +// purposes than created timestamp, but leave false by default for the +// best-effort compatibility. +func WithOMParserCTSeriesSkipped() OpenMetricsOption { + return func(o *openMetricsParserOptions) { + o.SkipCTSeries = true } } +// NewOpenMetricsParser returns a new parser for the byte slice with option to skip CT series parsing. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable, opts ...OpenMetricsOption) Parser { + options := &openMetricsParserOptions{} + + for _, opt := range opts { + opt(options) + } + + parser := &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + skipCTSeries: options.SkipCTSeries, + } + + return parser +} + // Series returns the bytes of the series, the timestamp if set, and the value // of the current sample. func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { @@ -219,10 +249,90 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } -// CreatedTimestamp returns nil as it's not implemented yet. -// TODO(bwplotka): https://github.com/prometheus/prometheus/issues/12980 +// CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. +// NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - return nil + if !TypeRequiresCT(p.mtype) { + // Not a CT supported metric type, fast path. + return nil + } + + var ( + currLset labels.Labels + buf []byte + peekWithoutNameLsetHash uint64 + ) + p.Metric(&currLset) + currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + // Search for the _created line for the currFamilyLsetHash using ephemeral parser until + // we see EOF or new metric family. We have to do it as we don't know where (and if) + // that CT line is. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + peek := deepCopy(p) + for { + eType, err := peek.Next() + if err != nil { + // This means peek will give error too later on, so def no CT line found. + // This might result in partial scrape with wrong/missing CT, but only + // spec improvement would help. + // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + return nil + } + if eType != EntrySeries { + // Assume we hit different family, no CT line found. + return nil + } + + var peekedLset labels.Labels + peek.Metric(&peekedLset) + peekedName := peekedLset.Get(model.MetricNameLabel) + if !strings.HasSuffix(peekedName, "_created") { + // Not a CT line, search more. + continue + } + + // We got a CT line here, but let's search if CT line is actually for our series, edge case. + peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") + if peekWithoutNameLsetHash != currFamilyLsetHash { + // CT line for a different series, for our series no CT. + return nil + } + ct := int64(peek.val) + return &ct + } +} + +// TypeRequiresCT returns true if the metric type requires a _created timestamp. +func TypeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false + } +} + +// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. +func deepCopy(p *OpenMetricsParser) OpenMetricsParser { + newB := make([]byte, len(p.l.b)) + copy(newB, p.l.b) + + newLexer := &openMetricsLexer{ + b: newB, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + newParser := OpenMetricsParser{ + l: newLexer, + builder: p.builder, + mtype: p.mtype, + val: p.val, + skipCTSeries: false, + } + return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -337,7 +447,13 @@ func (p *OpenMetricsParser) Next() (Entry, error) { } p.series = p.l.b[p.start:p.l.i] - return p.parseMetricSuffix(p.nextToken()) + if err := p.parseSeriesEndOfLine(p.nextToken()); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil case tMName: p.offsets = append(p.offsets, p.start, p.l.i) p.series = p.l.b[p.start:p.l.i] @@ -351,8 +467,14 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.series = p.l.b[p.start:p.l.i] t2 = p.nextToken() } - return p.parseMetricSuffix(t2) + if err := p.parseSeriesEndOfLine(t2); err != nil { + return EntryInvalid, err + } + if p.skipCTSeries && p.isCreatedSeries() { + return p.Next() + } + return EntrySeries, nil default: err = p.parseError("expected a valid start token", t) } @@ -467,51 +589,64 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e } } -// parseMetricSuffix parses the end of the line after the metric name and -// labels. It starts parsing with the provided token. -func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { +// isCreatedSeries returns true if the current series is a _created series. +func (p *OpenMetricsParser) isCreatedSeries() bool { + var newLbs labels.Labels + p.Metric(&newLbs) + name := newLbs.Get(model.MetricNameLabel) + if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + return true + } + return false +} + +// parseSeriesEndOfLine parses the series end of the line (value, optional +// timestamp, commentary, etc.) after the metric name and labels. +// It starts parsing with the provided token. +func (p *OpenMetricsParser) parseSeriesEndOfLine(t token) error { if p.offsets[0] == -1 { - return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + return fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) } var err error p.val, err = p.getFloatValue(t, "metric") if err != nil { - return EntryInvalid, err + return err } p.hasTS = false switch t2 := p.nextToken(); t2 { case tEOF: - return EntryInvalid, errors.New("data does not end with # EOF") + return errors.New("data does not end with # EOF") case tLinebreak: break case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } case tTimestamp: p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { - return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) } if math.IsNaN(ts) || math.IsInf(ts, 0) { - return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) + return fmt.Errorf("invalid timestamp %f", ts) } p.ts = int64(ts * 1000) switch t3 := p.nextToken(); t3 { case tLinebreak: case tComment: if err := p.parseComment(); err != nil { - return EntryInvalid, err + return err } default: - return EntryInvalid, p.parseError("expected next entry after timestamp", t3) + return p.parseError("expected next entry after timestamp", t3) } } - return EntrySeries, nil + + return nil } func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { diff --git a/model/textparse/openmetricsparse_test.go b/model/textparse/openmetricsparse_test.go index bc76a540d..cadaabc99 100644 --- a/model/textparse/openmetricsparse_test.go +++ b/model/textparse/openmetricsparse_test.go @@ -14,6 +14,7 @@ package textparse import ( + "errors" "io" "testing" @@ -24,6 +25,8 @@ import ( "github.com/prometheus/prometheus/model/labels" ) +func int64p(x int64) *int64 { return &x } + func TestOpenMetricsParse(t *testing.T) { input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary @@ -63,15 +66,34 @@ ss{A="a"} 0 _metric_starting_with_underscore 1 testmetric{_label_starting_with_underscore="foo"} 1 testmetric{label="\"bar\""} 1 +# HELP foo Counter with and without labels to certify CT is parsed for both cases # TYPE foo counter -foo_total 17.0 1520879607.789 # {id="counter-test"} 5` +foo_total 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created 1000 +foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 +foo_created{a="b"} 1000 +# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far +# TYPE bar summary +bar_count 17.0 +bar_sum 324789.3 +bar{quantile="0.95"} 123.7 +bar{quantile="0.99"} 150.0 +bar_created 1520430000 +# HELP baz Histogram with the same objective as above's summary +# TYPE baz histogram +baz_bucket{le="0.0"} 0 +baz_bucket{le="+Inf"} 17 +baz_count 17 +baz_sum 324789.3 +baz_created 1520430000 +# HELP fizz_created Gauge which shouldn't be parsed as CT +# TYPE fizz_created gauge +fizz_created 17.0` input += "\n# HELP metric foo\x00bar" input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\n# EOF\n" - int64p := func(x int64) *int64 { return &x } - exp := []expectedParse{ { m: "go_gc_duration_seconds", @@ -216,6 +238,9 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` m: "testmetric{label=\"\\\"bar\\\"\"}", v: 1, lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`), + }, { + m: "foo", + help: "Counter with and without labels to certify CT is parsed for both cases", }, { m: "foo", typ: model.MetricTypeCounter, @@ -225,6 +250,76 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` lset: labels.FromStrings("__name__", "foo_total"), t: int64p(1520879607789), e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: `foo_total{a="b"}`, + v: 17.0, + lset: labels.FromStrings("__name__", "foo_total", "a", "b"), + t: int64p(1520879607789), + e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, + ct: int64p(1000), + }, { + m: "bar", + help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", + }, { + m: "bar", + typ: model.MetricTypeSummary, + }, { + m: "bar_count", + v: 17.0, + lset: labels.FromStrings("__name__", "bar_count"), + ct: int64p(1520430000), + }, { + m: "bar_sum", + v: 324789.3, + lset: labels.FromStrings("__name__", "bar_sum"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.95"}`, + v: 123.7, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), + ct: int64p(1520430000), + }, { + m: `bar{quantile="0.99"}`, + v: 150.0, + lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), + ct: int64p(1520430000), + }, { + m: "baz", + help: "Histogram with the same objective as above's summary", + }, { + m: "baz", + typ: model.MetricTypeHistogram, + }, { + m: `baz_bucket{le="0.0"}`, + v: 0, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), + ct: int64p(1520430000), + }, { + m: `baz_bucket{le="+Inf"}`, + v: 17, + lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), + ct: int64p(1520430000), + }, { + m: `baz_count`, + v: 17, + lset: labels.FromStrings("__name__", "baz_count"), + ct: int64p(1520430000), + }, { + m: `baz_sum`, + v: 324789.3, + lset: labels.FromStrings("__name__", "baz_sum"), + ct: int64p(1520430000), + }, { + m: "fizz_created", + help: "Gauge which shouldn't be parsed as CT", + }, { + m: "fizz_created", + typ: model.MetricTypeGauge, + }, { + m: `fizz_created`, + v: 17, + lset: labels.FromStrings("__name__", "fizz_created"), }, { m: "metric", help: "foo\x00bar", @@ -235,8 +330,8 @@ foo_total 17.0 1520879607.789 # {id="counter-test"} 5` }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestUTF8OpenMetricsParse(t *testing.T) { @@ -251,6 +346,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) { # UNIT "go.gc_duration_seconds" seconds {"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 +{"go.gc_duration_seconds_created"} 12313 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05 @@ -274,10 +370,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) { m: `{"go.gc_duration_seconds",quantile="0"}`, v: 4.9351e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.25"}`, v: 7.424100000000001e-05, lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), + ct: int64p(12313), }, { m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, v: 8.3835e-05, @@ -306,8 +404,8 @@ choices}`, "strange©™\n'quoted' \"name\"", "6"), }, } - p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable()) - checkParseResults(t, p, exp) + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + checkParseResultsWithCT(t, p, exp, true) } func TestOpenMetricsParseErrors(t *testing.T) { @@ -598,10 +696,6 @@ func TestOpenMetricsParseErrors(t *testing.T) { input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf", err: `invalid exemplar timestamp -Inf`, }, - { - input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf", - err: `invalid exemplar timestamp +Inf`, - }, } for i, c := range cases { @@ -684,3 +778,217 @@ func TestOMNullByteHandling(t *testing.T) { require.Equal(t, c.err, err.Error(), "test %d", i) } } + +// While not desirable, there are cases were CT fails to parse and +// these tests show them. +// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. +func TestCTParseFailures(t *testing.T) { + input := `# HELP something Histogram with _created between buckets and summary +# TYPE something histogram +something_count 17 +something_sum 324789.3 +something_created 1520430001 +something_bucket{le="0.0"} 0 +something_bucket{le="+Inf"} 17 +# HELP thing Histogram with _created as first line +# TYPE thing histogram +thing_created 1520430002 +thing_count 17 +thing_sum 324789.3 +thing_bucket{le="0.0"} 0 +thing_bucket{le="+Inf"} 17 +# HELP yum Summary with _created between sum and quantiles +# TYPE yum summary +yum_count 17.0 +yum_sum 324789.3 +yum_created 1520430003 +yum{quantile="0.95"} 123.7 +yum{quantile="0.99"} 150.0 +# HELP foobar Summary with _created as the first line +# TYPE foobar summary +foobar_created 1520430004 +foobar_count 17.0 +foobar_sum 324789.3 +foobar{quantile="0.95"} 123.7 +foobar{quantile="0.99"} 150.0` + + input += "\n# EOF\n" + + int64p := func(x int64) *int64 { return &x } + + type expectCT struct { + m string + ct *int64 + typ model.MetricType + help string + isErr bool + } + + exp := []expectCT{ + { + m: "something", + help: "Histogram with _created between buckets and summary", + isErr: false, + }, { + m: "something", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `something_count`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_sum`, + ct: int64p(1520430001), + isErr: false, + }, { + m: `something_bucket{le="0.0"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: `something_bucket{le="+Inf"}`, + ct: int64p(1520430001), + isErr: true, + }, { + m: "thing", + help: "Histogram with _created as first line", + isErr: false, + }, { + m: "thing", + typ: model.MetricTypeHistogram, + isErr: false, + }, { + m: `thing_count`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_sum`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="0.0"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: `thing_bucket{le="+Inf"}`, + ct: int64p(1520430002), + isErr: true, + }, { + m: "yum", + help: "Summary with _created between summary and quantiles", + isErr: false, + }, { + m: "yum", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "yum_count", + ct: int64p(1520430003), + isErr: false, + }, { + m: "yum_sum", + ct: int64p(1520430003), + isErr: false, + }, { + m: `yum{quantile="0.95"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: `yum{quantile="0.99"}`, + ct: int64p(1520430003), + isErr: true, + }, { + m: "foobar", + help: "Summary with _created as the first line", + isErr: false, + }, { + m: "foobar", + typ: model.MetricTypeSummary, + isErr: false, + }, { + m: "foobar_count", + ct: int64p(1520430004), + isErr: true, + }, { + m: "foobar_sum", + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.95"}`, + ct: int64p(1520430004), + isErr: true, + }, { + m: `foobar{quantile="0.99"}`, + ct: int64p(1520430004), + isErr: true, + }, + } + + p := NewOpenMetricsParser([]byte(input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped()) + i := 0 + + var res labels.Labels + for { + et, err := p.Next() + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + + switch et { + case EntrySeries: + p.Metric(&res) + + if ct := p.CreatedTimestamp(); exp[i].isErr { + require.Nil(t, ct) + } else { + require.Equal(t, *exp[i].ct, *ct) + } + default: + i++ + continue + } + i++ + } +} + +func TestDeepCopy(t *testing.T) { + input := []byte(`# HELP go_goroutines A gauge goroutines. +# TYPE go_goroutines gauge +go_goroutines 33 123.123 +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds +go_gc_duration_seconds_created`) + + st := labels.NewSymbolTable() + parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser) + + // Modify the original parser state + _, err := parser.Next() + require.NoError(t, err) + require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.True(t, parser.skipCTSeries) + + // Create a deep copy of the parser + copyParser := deepCopy(parser) + etype, err := copyParser.Next() + require.NoError(t, err) + require.Equal(t, EntryType, etype) + require.True(t, parser.skipCTSeries) + require.False(t, copyParser.skipCTSeries) + + // Modify the original parser further + parser.Next() + parser.Next() + parser.Next() + require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]])) + require.Equal(t, "summary", string(parser.mtype)) + require.False(t, copyParser.skipCTSeries) + require.True(t, parser.skipCTSeries) + + // Ensure the copy remains unchanged + copyParser.Next() + copyParser.Next() + require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]])) + require.False(t, copyParser.skipCTSeries) +} diff --git a/model/textparse/promparse_test.go b/model/textparse/promparse_test.go index 66986291d..7971d23b7 100644 --- a/model/textparse/promparse_test.go +++ b/model/textparse/promparse_test.go @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "strings" "testing" "github.com/klauspost/compress/gzip" @@ -41,6 +42,7 @@ type expectedParse struct { unit string comment string e *exemplar.Exemplar + ct *int64 } func TestPromParse(t *testing.T) { @@ -188,6 +190,10 @@ testmetric{label="\"bar\""} 1` } func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { + checkParseResultsWithCT(t, p, exp, false) +} + +func checkParseResultsWithCT(t *testing.T, p Parser, exp []expectedParse, ctLinesRemoved bool) { i := 0 var res labels.Labels @@ -205,6 +211,14 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { p.Metric(&res) + if ctLinesRemoved { + // Are CT series skipped? + _, typ := p.Type() + if TypeRequiresCT(typ) && strings.HasSuffix(res.Get(labels.MetricName), "_created") { + t.Fatalf("we exped created lines skipped") + } + } + require.Equal(t, exp[i].m, string(m)) require.Equal(t, exp[i].t, ts) require.Equal(t, exp[i].v, v) @@ -218,6 +232,11 @@ func checkParseResults(t *testing.T, p Parser, exp []expectedParse) { require.True(t, found) testutil.RequireEqual(t, *exp[i].e, e) } + if ct := p.CreatedTimestamp(); ct != nil { + require.Equal(t, *exp[i].ct, *ct) + } else { + require.Nil(t, exp[i].ct) + } case EntryType: m, typ := p.Type() @@ -475,8 +494,10 @@ const ( func BenchmarkParse(b *testing.B) { for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{ - "prometheus": NewPromParser, - "openmetrics": NewOpenMetricsParser, + "prometheus": NewPromParser, + "openmetrics": func(b []byte, st *labels.SymbolTable) Parser { + return NewOpenMetricsParser(b, st) + }, } { for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} { f, err := os.Open(fn) diff --git a/model/textparse/protobufparse.go b/model/textparse/protobufparse.go index ea3a2e1a3..e384a75fc 100644 --- a/model/textparse/protobufparse.go +++ b/model/textparse/protobufparse.go @@ -47,7 +47,7 @@ import ( // the re-arrangement work is actually causing problems (which has to be seen), // that expectation needs to be changed. type ProtobufParser struct { - in []byte // The intput to parse. + in []byte // The input to parse. inPos int // Position within the input. metricPos int // Position within Metric slice. // fieldPos is the position within a Summary or (legacy) Histogram. -2 @@ -71,7 +71,7 @@ type ProtobufParser struct { mf *dto.MetricFamily - // Wether to also parse a classic histogram that is also present as a + // Whether to also parse a classic histogram that is also present as a // native histogram. parseClassicHistograms bool @@ -409,6 +409,7 @@ func (p *ProtobufParser) Next() (Entry, error) { switch p.state { case EntryInvalid: p.metricPos = 0 + p.exemplarPos = 0 p.fieldPos = -2 n, err := readDelimited(p.in[p.inPos:], p.mf) p.inPos += n @@ -485,6 +486,7 @@ func (p *ProtobufParser) Next() (Entry, error) { p.metricPos++ p.fieldPos = -2 p.fieldsDone = false + p.exemplarPos = 0 // If this is a metric family containing native // histograms, we have to switch back to native // histograms after parsing a classic histogram. diff --git a/model/textparse/protobufparse_test.go b/model/textparse/protobufparse_test.go index e323a6cc8..cf34ae52d 100644 --- a/model/textparse/protobufparse_test.go +++ b/model/textparse/protobufparse_test.go @@ -695,6 +695,70 @@ metric: < timestamp_ms: 1234568 > +`, + + `name: "test_histogram_with_native_histogram_exemplars2" +help: "Another histogram with native histogram exemplars." +type: HISTOGRAM +metric: < + histogram: < + sample_count: 175 + sample_sum: 0.0008280461746287094 + bucket: < + cumulative_count: 2 + upper_bound: -0.0004899999999999998 + > + bucket: < + cumulative_count: 4 + upper_bound: -0.0003899999999999998 + > + bucket: < + cumulative_count: 16 + upper_bound: -0.0002899999999999998 + > + schema: 3 + zero_threshold: 2.938735877055719e-39 + zero_count: 2 + negative_span: < + offset: -162 + length: 1 + > + negative_span: < + offset: 23 + length: 4 + > + negative_delta: 1 + negative_delta: 3 + negative_delta: -2 + negative_delta: -1 + negative_delta: 1 + positive_span: < + offset: -161 + length: 1 + > + positive_span: < + offset: 8 + length: 3 + > + positive_delta: 1 + positive_delta: 2 + positive_delta: -1 + positive_delta: -1 + exemplars: < + label: < + name: "dummyID" + value: "59780" + > + value: -0.00039 + timestamp: < + seconds: 1625851155 + nanos: 146848499 + > + > + > + timestamp_ms: 1234568 +> + `, } @@ -1276,6 +1340,41 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, + { + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, }, }, { @@ -1995,15 +2094,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "without_quantiles_sum", ), }, - { // 78 + { // 81 m: "empty_histogram", help: "A histogram without observations and with a zero threshold of zero but with a no-op span to identify it as a native histogram.", }, - { // 79 + { // 82 m: "empty_histogram", typ: model.MetricTypeHistogram, }, - { // 80 + { // 83 m: "empty_histogram", shs: &histogram.Histogram{ CounterResetHint: histogram.UnknownCounterReset, @@ -2014,15 +2113,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "empty_histogram", ), }, - { // 81 + { // 84 m: "test_counter_with_createdtimestamp", help: "A counter with a created timestamp.", }, - { // 82 + { // 85 m: "test_counter_with_createdtimestamp", typ: model.MetricTypeCounter, }, - { // 83 + { // 86 m: "test_counter_with_createdtimestamp", v: 42, ct: 1000, @@ -2030,15 +2129,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_counter_with_createdtimestamp", ), }, - { // 84 + { // 87 m: "test_summary_with_createdtimestamp", help: "A summary with a created timestamp.", }, - { // 85 + { // 88 m: "test_summary_with_createdtimestamp", typ: model.MetricTypeSummary, }, - { // 86 + { // 89 m: "test_summary_with_createdtimestamp_count", v: 42, ct: 1000, @@ -2046,7 +2145,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_summary_with_createdtimestamp_count", ), }, - { // 87 + { // 90 m: "test_summary_with_createdtimestamp_sum", v: 1.234, ct: 1000, @@ -2054,15 +2153,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_summary_with_createdtimestamp_sum", ), }, - { // 88 + { // 91 m: "test_histogram_with_createdtimestamp", help: "A histogram with a created timestamp.", }, - { // 89 + { // 92 m: "test_histogram_with_createdtimestamp", typ: model.MetricTypeHistogram, }, - { // 90 + { // 93 m: "test_histogram_with_createdtimestamp", ct: 1000, shs: &histogram.Histogram{ @@ -2074,15 +2173,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_createdtimestamp", ), }, - { // 91 + { // 94 m: "test_gaugehistogram_with_createdtimestamp", help: "A gauge histogram with a created timestamp.", }, - { // 92 + { // 95 m: "test_gaugehistogram_with_createdtimestamp", typ: model.MetricTypeGaugeHistogram, }, - { // 93 + { // 96 m: "test_gaugehistogram_with_createdtimestamp", ct: 1000, shs: &histogram.Histogram{ @@ -2094,15 +2193,15 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_gaugehistogram_with_createdtimestamp", ), }, - { // 94 + { // 97 m: "test_histogram_with_native_histogram_exemplars", help: "A histogram with native histogram exemplars.", }, - { // 95 + { // 98 m: "test_histogram_with_native_histogram_exemplars", typ: model.MetricTypeHistogram, }, - { // 96 + { // 99 m: "test_histogram_with_native_histogram_exemplars", t: 1234568, shs: &histogram.Histogram{ @@ -2130,7 +2229,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59772"), Value: -0.00052, HasTs: true, Ts: 1625851160156}, }, }, - { // 97 + { // 100 m: "test_histogram_with_native_histogram_exemplars_count", t: 1234568, v: 175, @@ -2138,7 +2237,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_native_histogram_exemplars_count", ), }, - { // 98 + { // 101 m: "test_histogram_with_native_histogram_exemplars_sum", t: 1234568, v: 0.0008280461746287094, @@ -2146,7 +2245,7 @@ func TestProtobufParse(t *testing.T) { "__name__", "test_histogram_with_native_histogram_exemplars_sum", ), }, - { // 99 + { // 102 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0004899999999999998", t: 1234568, v: 2, @@ -2155,7 +2254,7 @@ func TestProtobufParse(t *testing.T) { "le", "-0.0004899999999999998", ), }, - { // 100 + { // 103 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0003899999999999998", t: 1234568, v: 4, @@ -2167,7 +2266,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "59727"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, }, }, - { // 101 + { // 104 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff-0.0002899999999999998", t: 1234568, v: 16, @@ -2179,7 +2278,7 @@ func TestProtobufParse(t *testing.T) { {Labels: labels.FromStrings("dummyID", "5617"), Value: -0.00029, HasTs: false}, }, }, - { // 102 + { // 105 m: "test_histogram_with_native_histogram_exemplars_bucket\xffle\xff+Inf", t: 1234568, v: 175, @@ -2188,6 +2287,93 @@ func TestProtobufParse(t *testing.T) { "le", "+Inf", ), }, + { // 106 + m: "test_histogram_with_native_histogram_exemplars2", + help: "Another histogram with native histogram exemplars.", + }, + { // 107 + m: "test_histogram_with_native_histogram_exemplars2", + typ: model.MetricTypeHistogram, + }, + { // 108 + m: "test_histogram_with_native_histogram_exemplars2", + t: 1234568, + shs: &histogram.Histogram{ + Count: 175, + ZeroCount: 2, + Sum: 0.0008280461746287094, + ZeroThreshold: 2.938735877055719e-39, + Schema: 3, + PositiveSpans: []histogram.Span{ + {Offset: -161, Length: 1}, + {Offset: 8, Length: 3}, + }, + NegativeSpans: []histogram.Span{ + {Offset: -162, Length: 1}, + {Offset: 23, Length: 4}, + }, + PositiveBuckets: []int64{1, 2, -1, -1}, + NegativeBuckets: []int64{1, 3, -2, -1, 1}, + }, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2", + ), + e: []exemplar.Exemplar{ + {Labels: labels.FromStrings("dummyID", "59780"), Value: -0.00039, HasTs: true, Ts: 1625851155146}, + }, + }, + { // 109 + m: "test_histogram_with_native_histogram_exemplars2_count", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_count", + ), + }, + { // 110 + m: "test_histogram_with_native_histogram_exemplars2_sum", + t: 1234568, + v: 0.0008280461746287094, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_sum", + ), + }, + { // 111 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0004899999999999998", + t: 1234568, + v: 2, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0004899999999999998", + ), + }, + { // 112 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0003899999999999998", + t: 1234568, + v: 4, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0003899999999999998", + ), + }, + { // 113 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff-0.0002899999999999998", + t: 1234568, + v: 16, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "-0.0002899999999999998", + ), + }, + { // 114 + m: "test_histogram_with_native_histogram_exemplars2_bucket\xffle\xff+Inf", + t: 1234568, + v: 175, + lset: labels.FromStrings( + "__name__", "test_histogram_with_native_histogram_exemplars2_bucket", + "le", "+Inf", + ), + }, }, }, } diff --git a/notifier/notifier.go b/notifier/notifier.go index 68b0d4961..218e4cb8c 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -674,7 +674,6 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b }() // Any HTTP status 2xx is OK. - //nolint:usestdlibvars if resp.StatusCode/100 != 2 { return fmt.Errorf("bad response status %s", resp.Status) } diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index 2cdaa9e06..cf922a537 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -711,7 +711,7 @@ func TestHangingNotifier(t *testing.T) { ) var ( - sendTimeout = 10 * time.Millisecond + sendTimeout = 100 * time.Millisecond sdUpdatert = sendTimeout / 2 done = make(chan struct{}) diff --git a/prompb/io/prometheus/write/v2/types.pb.go b/prompb/io/prometheus/write/v2/types.pb.go index d6ea8398f..3420d20e2 100644 --- a/prompb/io/prometheus/write/v2/types.pb.go +++ b/prompb/io/prometheus/write/v2/types.pb.go @@ -302,15 +302,10 @@ type Exemplar struct { // value represents an exact example value. This can be useful when the exemplar // is attached to a histogram, which only gives an estimated value through buckets. Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - // timestamp represents an optional timestamp of the sample in ms. + // timestamp represents the timestamp of the exemplar in ms. // // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` diff --git a/prompb/io/prometheus/write/v2/types.proto b/prompb/io/prometheus/write/v2/types.proto index 0cc7b8bc4..ff6c4936b 100644 --- a/prompb/io/prometheus/write/v2/types.proto +++ b/prompb/io/prometheus/write/v2/types.proto @@ -107,15 +107,10 @@ message Exemplar { // value represents an exact example value. This can be useful when the exemplar // is attached to a histogram, which only gives an estimated value through buckets. double value = 2; - // timestamp represents an optional timestamp of the sample in ms. + // timestamp represents the timestamp of the exemplar in ms. // // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. int64 timestamp = 3; } diff --git a/promql/bench_test.go b/promql/bench_test.go index bd6728029..74e85b054 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/tsdbutil" "github.com/prometheus/prometheus/util/teststorage" @@ -165,6 +166,9 @@ func rangeQueryCases() []benchCase { { expr: "sum(a_X)", }, + { + expr: "avg(a_X)", + }, { expr: "sum without (l)(h_X)", }, @@ -271,7 +275,7 @@ func BenchmarkRangeQuery(b *testing.B) { MaxSamples: 50000000, Timeout: 100 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(b, opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. @@ -362,7 +366,7 @@ func BenchmarkNativeHistograms(b *testing.B) { for _, tc := range cases { b.Run(tc.name, func(b *testing.B) { - ng := promql.NewEngine(opts) + ng := promqltest.NewTestEngineWithOpts(b, opts) for i := 0; i < b.N; i++ { qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step) if err != nil { diff --git a/promql/engine.go b/promql/engine.go index 25e67db63..dd855c6d2 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "io" "math" "reflect" "runtime" @@ -271,6 +272,8 @@ func contextErr(err error, env string) error { // // 2) Enforcement of the maximum number of concurrent queries. type QueryTracker interface { + io.Closer + // GetMaxConcurrent returns maximum number of concurrent queries that are allowed by this tracker. GetMaxConcurrent() int @@ -313,6 +316,11 @@ type EngineOpts struct { // EnablePerStepStats if true allows for per-step stats to be computed on request. Disabled otherwise. EnablePerStepStats bool + + // EnableDelayedNameRemoval delays the removal of the __name__ label to the last step of the query evaluation. + // This is useful in certain scenarios where the __name__ label must be preserved or where applying a + // regex-matcher to the __name__ label may otherwise lead to duplicate labelset errors. + EnableDelayedNameRemoval bool } // Engine handles the lifetime of queries from beginning to end. @@ -330,6 +338,7 @@ type Engine struct { enableAtModifier bool enableNegativeOffset bool enablePerStepStats bool + enableDelayedNameRemoval bool } // NewEngine returns a new engine. @@ -420,9 +429,18 @@ func NewEngine(opts EngineOpts) *Engine { enableAtModifier: opts.EnableAtModifier, enableNegativeOffset: opts.EnableNegativeOffset, enablePerStepStats: opts.EnablePerStepStats, + enableDelayedNameRemoval: opts.EnableDelayedNameRemoval, } } +// Close closes ng. +func (ng *Engine) Close() error { + if ng.activeQueryTracker != nil { + return ng.activeQueryTracker.Close() + } + return nil +} + // SetQueryLogger sets the query logger. func (ng *Engine) SetQueryLogger(l QueryLogger) { ng.queryLoggerLock.Lock() @@ -573,7 +591,7 @@ func (ng *Engine) validateOpts(expr parser.Expr) error { return validationErr } -// NewTestQuery: inject special behaviour into Query for testing. +// NewTestQuery injects special behaviour into Query for testing. func (ng *Engine) NewTestQuery(f func(context.Context) error) Query { qry := &query{ q: "test statement", @@ -706,16 +724,16 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: start, endTimestamp: start, interval: 1, - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(start, start, 1) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -743,9 +761,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval // Point might have a different timestamp, force it to the evaluation // timestamp as that is when we ran the evaluation. if len(s.Histograms) > 0 { - vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start} + vector[i] = Sample{Metric: s.Metric, H: s.Histograms[0].H, T: start, DropName: s.DropName} } else { - vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start} + vector[i] = Sample{Metric: s.Metric, F: s.Floats[0].F, T: start, DropName: s.DropName} } } return vector, warnings, nil @@ -764,15 +782,15 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval startTimestamp: timeMilliseconds(s.Start), endTimestamp: timeMilliseconds(s.End), interval: durationMilliseconds(s.Interval), - ctx: ctxInnerEval, maxSamples: ng.maxSamplesPerQuery, logger: ng.logger, lookbackDelta: s.LookbackDelta, samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ng.enableDelayedNameRemoval, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) - val, warnings, err := evaluator.Eval(s.Expr) + val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) evalSpanTimer.Finish() @@ -984,6 +1002,8 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations if e.Series != nil { return nil, nil } + span := trace.SpanFromContext(ctx) + span.AddEvent("expand start", trace.WithAttributes(attribute.String("selector", e.String()))) series, ws, err := expandSeriesSet(ctx, e.UnexpandedSeriesSet) if e.SkipHistogramBuckets { for i := range series { @@ -991,6 +1011,7 @@ func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations } } e.Series = series + span.AddEvent("expand end", trace.WithAttributes(attribute.Int("num_series", len(series)))) return ws, err } return nil, nil @@ -1020,8 +1041,6 @@ func (e errWithWarnings) Error() string { return e.err.Error() } // querier and reports errors. On timeout or cancellation of its context it // terminates. type evaluator struct { - ctx context.Context - startTimestamp int64 // Start time in milliseconds. endTimestamp int64 // End time in milliseconds. interval int64 // Interval in milliseconds. @@ -1032,6 +1051,7 @@ type evaluator struct { lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 + enableDelayedNameRemoval bool } // errorf causes a panic with the input formatted into an error. @@ -1057,7 +1077,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic in parser", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -1069,10 +1089,13 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp } } -func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { +func (ev *evaluator) Eval(ctx context.Context, expr parser.Expr) (v parser.Value, ws annotations.Annotations, err error) { defer ev.recover(expr, &ws, &err) - v, ws = ev.eval(expr) + v, ws = ev.eval(ctx, expr) + if ev.enableDelayedNameRemoval { + ev.cleanupMetricLabels(v) + } return v, ws, nil } @@ -1101,6 +1124,9 @@ type EvalNodeHelper struct { rightSigs map[string]Sample matchedSigs map[string]map[uint64]struct{} resultMetric map[string]labels.Labels + + // Additional options for the evaluation. + enableDelayedNameRemoval bool } func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { @@ -1117,7 +1143,7 @@ func (enh *EvalNodeHelper) resetBuilder(lbls labels.Labels) { // function call results. // The prepSeries function (if provided) can be used to prepare the helper // for each series, then passed to each call funcCall. -func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Labels, *EvalSeriesHelper), funcCall func([]parser.Value, [][]EvalSeriesHelper, *EvalNodeHelper) (Vector, annotations.Annotations), exprs ...parser.Expr) (Matrix, annotations.Annotations) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -1128,7 +1154,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // Functions will take string arguments from the expressions, not the values. if e != nil && e.Type() != parser.ValueTypeString { // ev.currentSamples will be updated to the correct value within the ev.eval call. - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) warnings.Merge(ws) matrixes[i] = val.(Matrix) @@ -1150,7 +1176,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) biggestLen = len(matrixes[i]) } } - enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen)} + enh := &EvalNodeHelper{Out: make(Vector, 0, biggestLen), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} type seriesAndTimestamp struct { Series ts int64 @@ -1180,7 +1206,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1196,12 +1222,12 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) for si, series := range matrixes[i] { switch { case len(series.Floats) > 0 && series.Floats[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName}) // Move input vectors forward so we don't have to re-scan the same // past points at the next step. matrixes[i][si].Floats = series.Floats[1:] case len(series.Histograms) > 0 && series.Histograms[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts}) + vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName}) matrixes[i][si].Histograms = series.Histograms[1:] default: continue @@ -1240,15 +1266,15 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) // If this could be an instant query, shortcut so as not to change sort order. if ev.endTimestamp == ev.startTimestamp { - if result.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && result.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } mat := make(Matrix, len(result)) for i, s := range result { if s.H == nil { - mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}} + mat[i] = Series{Metric: s.Metric, Floats: []FPoint{{T: ts, F: s.F}}, DropName: s.DropName} } else { - mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}} + mat[i] = Series{Metric: s.Metric, Histograms: []HPoint{{T: ts, H: s.H}}, DropName: s.DropName} } } ev.currentSamples = originalNumSamples + mat.TotalSamples() @@ -1266,7 +1292,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) } ss.ts = ts } else { - ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + ss = seriesAndTimestamp{Series{Metric: sample.Metric, DropName: sample.DropName}, ts} } addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) seriess[h] = ss @@ -1290,7 +1316,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper) return mat, warnings } -func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { +func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) { // Keep a copy of the original point slice so that it can be returned to the pool. origMatrix := slices.Clone(inputMatrix) defer func() { @@ -1302,7 +1328,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping var warnings annotations.Annotations - enh := &EvalNodeHelper{} + enh := &EvalNodeHelper{enableDelayedNameRemoval: ev.enableDelayedNameRemoval} tempNumSamples := ev.currentSamples // Create a mapping from input series to output groups. @@ -1370,7 +1396,7 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping } for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } // Reset number of samples in memory after each timestamp. @@ -1421,11 +1447,11 @@ func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { +func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { samplesStats := ev.samplesStats // Avoid double counting samples when running a subquery, those samples will be counted in later stage. ev.samplesStats = ev.samplesStats.NewChild() - val, ws := ev.eval(subq) + val, ws := ev.eval(ctx, subq) // But do incorporate the peak from the subquery samplesStats.UpdatePeakFromSubquery(ev.samplesStats) ev.samplesStats = samplesStats @@ -1452,18 +1478,20 @@ func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSele } // eval evaluates the given expression as the given AST expression node requires. -func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotations) { +func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, annotations.Annotations) { // This is the top-level evaluation method. // Thus, we check for timeout/cancellation here. - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 // Create a new span to help investigate inner evaluation performances. - ctxWithSpan, span := otel.Tracer("").Start(ev.ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) - ev.ctx = ctxWithSpan + ctx, span := otel.Tracer("").Start(ctx, stats.InnerEvalTime.SpanOperation()+" eval "+reflect.TypeOf(expr).String()) defer span.End() + if ss, ok := expr.(interface{ ShortString() string }); ok { + span.SetAttributes(attribute.String("operation", ss.ShortString())) + } switch e := expr.(type) { case *parser.AggregateExpr: @@ -1484,7 +1512,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sortedGrouping = append(sortedGrouping, valueLabel.Val) slices.Sort(sortedGrouping) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh) }, e.Expr) } @@ -1494,16 +1522,16 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // param is the number k for topk/bottomk, or q for quantile. var fParam float64 if param != nil { - val, ws := ev.eval(param) + val, ws := ev.eval(ctx, param) warnings.Merge(ws) fParam = val.(Matrix)[0].Floats[0].F } // Now fetch the data to be aggregated. - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) warnings.Merge(ws) inputMatrix := val.(Matrix) - result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam) + result, ws := ev.rangeEvalAgg(ctx, e, sortedGrouping, inputMatrix, fParam) warnings.Merge(ws) ev.currentSamples = originalNumSamples + result.TotalSamples() ev.samplesStats.UpdatePeak(ev.currentSamples) @@ -1521,7 +1549,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio unwrapParenExpr(&arg) vs, ok := arg.(*parser.VectorSelector) if ok { - return ev.rangeEvalTimestampFunctionOverVectorSelector(vs, call, e) + return ev.rangeEvalTimestampFunctionOverVectorSelector(ctx, vs, call, e) } } @@ -1545,7 +1573,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio matrixArgIndex = i matrixArg = true // Replacing parser.SubqueryExpr with parser.MatrixSelector. - val, totalSamples, ws := ev.evalSubquery(subq) + val, totalSamples, ws := ev.evalSubquery(ctx, subq) e.Args[i] = val warnings.Merge(ws) defer func() { @@ -1560,14 +1588,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio // Special handling for functions that work on series not samples. switch e.Func.Name { case "label_replace": - return ev.evalLabelReplace(e.Args) + return ev.evalLabelReplace(ctx, e.Args) case "label_join": - return ev.evalLabelJoin(e.Args) + return ev.evalLabelJoin(ctx, e.Args) } if !matrixArg { // Does not have a matrix argument. - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, annos := call(v, e.Args, enh) return vec, warnings.Merge(annos) }, e.Args...) @@ -1579,7 +1607,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio otherInArgs := make([]Vector, len(e.Args)) for i, e := range e.Args { if i != matrixArgIndex { - val, ws := ev.eval(e) + val, ws := ev.eval(ctx, e) otherArgs[i] = val.(Matrix) otherInArgs[i] = Vector{Sample{}} inArgs[i] = otherInArgs[i] @@ -1593,7 +1621,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio sel := arg.(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) - ws, err := checkAndExpandSeriesSet(ev.ctx, sel) + ws, err := checkAndExpandSeriesSet(ctx, sel) warnings.Merge(ws) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), warnings}) @@ -1611,12 +1639,19 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio var prevSS *Series inMatrix := make(Matrix, 1) inArgs[matrixArgIndex] = inMatrix - enh := &EvalNodeHelper{Out: make(Vector, 0, 1)} + enh := &EvalNodeHelper{Out: make(Vector, 0, 1), enableDelayedNameRemoval: ev.enableDelayedNameRemoval} // Process all the calls for one time series at a time. it := storage.NewBuffer(selRange) var chkIter chunkenc.Iterator + + // The last_over_time function acts like offset; thus, it + // should keep the metric name. For all the other range + // vector functions, the only change needed is to drop the + // metric name in the output. + dropName := e.Func.Name != "last_over_time" + for i, s := range selVS.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } ev.currentSamples -= len(floats) + totalHPointSize(histograms) @@ -1629,15 +1664,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio chkIter = s.Iterator(chkIter) it.Reset(chkIter) metric := selVS.Series[i].Labels() - // The last_over_time function acts like offset; thus, it - // should keep the metric name. For all the other range - // vector functions, the only change needed is to drop the - // metric name in the output. - if e.Func.Name != "last_over_time" { + if !ev.enableDelayedNameRemoval && dropName { metric = metric.DropMetricName() } ss := Series{ - Metric: metric, + Metric: metric, + DropName: dropName, } inMatrix[0].Metric = selVS.Series[i].Labels() for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -1752,32 +1784,35 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio return Matrix{ Series{ - Metric: createLabelsForAbsentFunction(e.Args[0]), - Floats: newp, + Metric: createLabelsForAbsentFunction(e.Args[0]), + Floats: newp, + DropName: dropName, }, }, warnings } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } - return mat, warnings case *parser.ParenExpr: - return ev.eval(e.Expr) + return ev.eval(ctx, e.Expr) case *parser.UnaryExpr: - val, ws := ev.eval(e.Expr) + val, ws := ev.eval(ctx, e.Expr) mat := val.(Matrix) if e.Op == parser.SUB { for i := range mat { - mat[i].Metric = mat[i].Metric.DropMetricName() + if !ev.enableDelayedNameRemoval { + mat[i].Metric = mat[i].Metric.DropMetricName() + } + mat[i].DropName = true for j := range mat[i].Floats { mat[i].Floats[j].F = -mat[i].Floats[j].F } } - if mat.ContainsSameLabelset() { + if !ev.enableDelayedNameRemoval && mat.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } } @@ -1786,7 +1821,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.BinaryExpr: switch lt, rt := e.LHS.Type(), e.RHS.Type(); { case lt == parser.ValueTypeScalar && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { val := scalarBinop(e.Op, v[0].(Vector)[0].F, v[1].(Vector)[0].F) return append(enh.Out, Sample{F: val}), nil }, e.LHS, e.RHS) @@ -1799,47 +1834,49 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio } switch e.Op { case parser.LAND: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorAnd(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LOR: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorOr(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) case parser.LUNLESS: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return ev.VectorUnless(v[0].(Vector), v[1].(Vector), e.VectorMatching, sh[0], sh[1], enh), nil }, e.LHS, e.RHS) default: - return ev.rangeEval(initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case *parser.NumberLiteral: - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + span.SetAttributes(attribute.Float64("value", e.Val)) + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, Sample{F: e.Val, Metric: labels.EmptyLabels()}), nil }) case *parser.StringLiteral: + span.SetAttributes(attribute.String("value", e.Val)) return String{V: e.Val, T: ev.startTimestamp}, nil case *parser.VectorSelector: - ws, err := checkAndExpandSeriesSet(ev.ctx, e) + ws, err := checkAndExpandSeriesSet(ctx, e) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -1848,7 +1885,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) var chkIter chunkenc.Iterator for i, s := range e.Series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) @@ -1899,20 +1936,20 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio if ev.startTimestamp != ev.endTimestamp { panic(errors.New("cannot do range evaluation of matrix selector")) } - return ev.matrixSelector(e) + return ev.matrixSelector(ctx, e) case *parser.SubqueryExpr: offsetMillis := durationMilliseconds(e.Offset) rangeMillis := durationMilliseconds(e.Range) newEv := &evaluator{ endTimestamp: ev.endTimestamp - offsetMillis, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } if e.Step != 0 { @@ -1935,7 +1972,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio setOffsetForAtModifier(newEv.startTimestamp, e.Expr) } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) ev.samplesStats.IncrementSamplesAtTimestamp(ev.endTimestamp, newEv.samplesStats.TotalSamples) @@ -1943,22 +1980,22 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio case *parser.StepInvariantExpr: switch ce := e.Expr.(type) { case *parser.StringLiteral, *parser.NumberLiteral: - return ev.eval(ce) + return ev.eval(ctx, ce) } newEv := &evaluator{ startTimestamp: ev.startTimestamp, endTimestamp: ev.startTimestamp, // Always a single evaluation. interval: ev.interval, - ctx: ev.ctx, currentSamples: ev.currentSamples, maxSamples: ev.maxSamples, logger: ev.logger, lookbackDelta: ev.lookbackDelta, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + enableDelayedNameRemoval: ev.enableDelayedNameRemoval, } - res, ws := newEv.eval(e.Expr) + res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples ev.samplesStats.UpdatePeakFromSubquery(newEv.samplesStats) for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { @@ -2034,8 +2071,8 @@ func reuseOrGetFPointSlices(prevSS *Series, numSteps int) (r []FPoint) { return getFPointSlice(numSteps) } -func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { - ws, err := checkAndExpandSeriesSet(ev.ctx, vs) +func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Context, vs *parser.VectorSelector, call FunctionCall, e *parser.Call) (parser.Value, annotations.Annotations) { + ws, err := checkAndExpandSeriesSet(ctx, vs) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2046,7 +2083,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) } - return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { + return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if vs.Timestamp != nil { // This is a special case for "timestamp()" when the @ modifier is used, to ensure that // we return a point for each time step in this case. @@ -2182,7 +2219,7 @@ func putMatrixSelectorHPointSlice(p []HPoint) { } // matrixSelector evaluates a *parser.MatrixSelector expression. -func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annotations.Annotations) { +func (ev *evaluator) matrixSelector(ctx context.Context, node *parser.MatrixSelector) (Matrix, annotations.Annotations) { var ( vs = node.VectorSelector.(*parser.VectorSelector) @@ -2193,7 +2230,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota it = storage.NewBuffer(durationMilliseconds(node.Range)) ) - ws, err := checkAndExpandSeriesSet(ev.ctx, node) + ws, err := checkAndExpandSeriesSet(ctx, node) if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } @@ -2201,7 +2238,7 @@ func (ev *evaluator) matrixSelector(node *parser.MatrixSelector) (Matrix, annota var chkIter chunkenc.Iterator series := vs.Series for i, s := range series { - if err := contextDone(ev.ctx, "expression evaluation"); err != nil { + if err := contextDone(ctx, "expression evaluation"); err != nil { ev.error(err) } chkIter = s.Iterator(chkIter) @@ -2356,6 +2393,11 @@ loop: } else { histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Make sure to pass non-nil H to AtFloatHistogram so that it does a deep-copy. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) if value.IsStaleNaN(histograms[n].H.Sum) { histograms = histograms[:n] @@ -2548,7 +2590,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * continue } metric := resultMetric(ls.Metric, rs.Metric, op, matching, enh) - if returnBool { + if !ev.enableDelayedNameRemoval && returnBool { metric = metric.DropMetricName() } insertedSigs, exists := matchedSigs[sig] @@ -2573,9 +2615,10 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * } enh.Out = append(enh.Out, Sample{ - Metric: metric, - F: floatValue, - H: histogramValue, + Metric: metric, + F: floatValue, + H: histogramValue, + DropName: returnBool, }) } return enh.Out, lastErr @@ -2675,7 +2718,10 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lhsSample.F = float lhsSample.H = histogram if shouldDropMetricName(op) || returnBool { - lhsSample.Metric = lhsSample.Metric.DropMetricName() + if !ev.enableDelayedNameRemoval { + lhsSample.Metric = lhsSample.Metric.DropMetricName() + } + lhsSample.DropName = true } enh.Out = append(enh.Out, lhsSample) } @@ -2773,15 +2819,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } type groupedAggregation struct { - seen bool // Was this output groups seen in the input at this timestamp. - hasFloat bool // Has at least 1 float64 sample aggregated. - hasHistogram bool // Has at least 1 histogram sample aggregated. - floatValue float64 - histogramValue *histogram.FloatHistogram - floatMean float64 // Mean, or "compensating value" for Kahan summation. - groupCount int - groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group - heap vectorByValueHeap + floatValue float64 + histogramValue *histogram.FloatHistogram + floatMean float64 + floatKahanC float64 // "Compensating value" for Kahan summation. + groupCount float64 + heap vectorByValueHeap + + // All bools together for better packing within the struct. + seen bool // Was this output groups seen in the input at this timestamp. + hasFloat bool // Has at least 1 float64 sample aggregated. + hasHistogram bool // Has at least 1 histogram sample aggregated. + incompatibleHistograms bool // If true, group has seen mixed exponential and custom buckets, or incompatible custom buckets. + groupAggrComplete bool // Used by LIMITK to short-cut series loop when we've reached K elem on every group. + incrementalMean bool // True after reverting to incremental calculation of the mean value. } // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix. @@ -2805,15 +2856,14 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // Initialize this group if it's the first time we've seen it. if !group.seen { *group = groupedAggregation{ - seen: true, - floatValue: f, - groupCount: 1, + seen: true, + floatValue: f, + floatMean: f, + incompatibleHistograms: false, + groupCount: 1, } switch op { - case parser.AVG: - group.floatMean = f - fallthrough - case parser.SUM: + case parser.AVG, parser.SUM: if h == nil { group.hasFloat = true } else { @@ -2821,7 +2871,6 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatMean = f group.floatValue = 0 case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) @@ -2832,6 +2881,10 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix continue } + if group.incompatibleHistograms { + continue + } + switch op { case parser.SUM: if h != nil { @@ -2840,6 +2893,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix _, err := group.histogramValue.Add(h) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true } } // Otherwise the aggregation contained floats @@ -2847,7 +2901,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true - group.floatValue, group.floatMean = kahanSumInc(f, group.floatValue, group.floatMean) + group.floatValue, group.floatKahanC = kahanSumInc(f, group.floatValue, group.floatKahanC) } case parser.AVG: @@ -2855,15 +2909,19 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h != nil { group.hasHistogram = true if group.histogramValue != nil { - left := h.Copy().Div(float64(group.groupCount)) - right := group.histogramValue.Copy().Div(float64(group.groupCount)) + left := h.Copy().Div(group.groupCount) + right := group.histogramValue.Copy().Div(group.groupCount) toAdd, err := left.Sub(right) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } _, err = group.histogramValue.Add(toAdd) if err != nil { handleAggregationError(err, e, inputMatrix[si].Metric.Get(model.MetricNameLabel), &annos) + group.incompatibleHistograms = true + continue } } // Otherwise the aggregation contained floats @@ -2871,6 +2929,22 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix // point in copying the histogram in that case. } else { group.hasFloat = true + if !group.incrementalMean { + newV, newC := kahanSumInc(f, group.floatValue, group.floatKahanC) + if !math.IsInf(newV, 0) { + // The sum doesn't overflow, so we propagate it to the + // group struct and continue with the regular + // calculation of the mean value. + group.floatValue, group.floatKahanC = newV, newC + break + } + // If we are here, we know that the sum _would_ overflow. So + // instead of continue to sum up, we revert to incremental + // calculation of the mean value from here on. + group.incrementalMean = true + group.floatMean = group.floatValue / (group.groupCount - 1) + group.floatKahanC /= group.groupCount - 1 + } if math.IsInf(group.floatMean, 0) { if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) { // The `floatMean` and `s.F` values are `Inf` of the same sign. They @@ -2888,8 +2962,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix break } } - // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. - group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount) + currentMean := group.floatMean + group.floatKahanC + group.floatMean, group.floatKahanC = kahanSumInc( + // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. + f/group.groupCount-currentMean/group.groupCount, + group.floatMean, + group.floatKahanC, + ) } case parser.GROUP: @@ -2912,7 +2991,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix if h == nil { // Ignore native histograms. group.groupCount++ delta := f - group.floatMean - group.floatMean += delta / float64(group.groupCount) + group.floatMean += delta / group.groupCount group.floatValue += delta * (f - group.floatMean) } @@ -2938,20 +3017,25 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: aggr.histogramValue = aggr.histogramValue.Compact(0) - } else { - aggr.floatValue = aggr.floatMean + case aggr.incrementalMean: + aggr.floatValue = aggr.floatMean + aggr.floatKahanC + default: + aggr.floatValue = (aggr.floatValue + aggr.floatKahanC) / aggr.groupCount } case parser.COUNT: - aggr.floatValue = float64(aggr.groupCount) + aggr.floatValue = aggr.groupCount case parser.STDVAR: - aggr.floatValue /= float64(aggr.groupCount) + aggr.floatValue /= aggr.groupCount case parser.STDDEV: - aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) + aggr.floatValue = math.Sqrt(aggr.floatValue / aggr.groupCount) case parser.QUANTILE: aggr.floatValue = quantile(q, aggr.heap) @@ -2962,10 +3046,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(e.Expr.PositionRange())) continue } - if aggr.hasHistogram { + switch { + case aggr.incompatibleHistograms: + continue + case aggr.hasHistogram: aggr.histogramValue.Compact(0) - } else { - aggr.floatValue += aggr.floatMean // Add Kahan summation compensating term. + default: + aggr.floatValue += aggr.floatKahanC } default: // For other aggregations, we already have the right value. @@ -2973,6 +3060,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix ss := &outputMatrix[ri] addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps) + ss.DropName = inputMatrix[ri].DropName } return annos @@ -2999,7 +3087,7 @@ seriesLoop: if !ok { continue } - s = Sample{Metric: inputMatrix[si].Metric, F: f} + s = Sample{Metric: inputMatrix[si].Metric, F: f, DropName: inputMatrix[si].DropName} group := &groups[seriesToResult[si]] // Initialize this group if it's the first time we've seen it. @@ -3083,16 +3171,16 @@ seriesLoop: mat = make(Matrix, 0, len(groups)) } - add := func(lbls labels.Labels, f float64) { + add := func(lbls labels.Labels, f float64, dropName bool) { // If this could be an instant query, add directly to the matrix so the result is in consistent order. if ev.endTimestamp == ev.startTimestamp { - mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}}) + mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}, DropName: dropName}) } else { // Otherwise the results are added into seriess elements. hash := lbls.Hash() ss, ok := seriess[hash] if !ok { - ss = Series{Metric: lbls} + ss = Series{Metric: lbls, DropName: dropName} } addToSeries(&ss, enh.Ts, f, nil, numSteps) seriess[hash] = ss @@ -3109,7 +3197,7 @@ seriesLoop: sort.Sort(sort.Reverse(aggr.heap)) } for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.DropName) } case parser.BOTTOMK: @@ -3118,12 +3206,12 @@ seriesLoop: sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap))) } for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.DropName) } case parser.LIMITK, parser.LIMIT_RATIO: for _, v := range aggr.heap { - add(v.Metric, v.F) + add(v.Metric, v.F, v.DropName) } } } @@ -3131,7 +3219,7 @@ seriesLoop: return mat, annos } -// aggregationK evaluates count_values on vec. +// aggregationCountValues evaluates count_values on vec. // Outputs as many series per group as there are values in the input. func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) { type groupCount struct { @@ -3175,6 +3263,30 @@ func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping [] return enh.Out, nil } +func (ev *evaluator) cleanupMetricLabels(v parser.Value) { + if v.Type() == parser.ValueTypeMatrix { + mat := v.(Matrix) + for i := range mat { + if mat[i].DropName { + mat[i].Metric = mat[i].Metric.DropMetricName() + } + } + if mat.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } else if v.Type() == parser.ValueTypeVector { + vec := v.(Vector) + for i := range vec { + if vec[i].DropName { + vec[i].Metric = vec[i].Metric.DropMetricName() + } + } + if vec.ContainsSameLabelset() { + ev.errorf("vector cannot contain metrics with the same labelset") + } + } +} + func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) { if h == nil { if ss.Floats == nil { @@ -3485,14 +3597,14 @@ func makeInt64Pointer(val int64) *int64 { return valp } -// Add RatioSampler interface to allow unit-testing (previously: Randomizer). +// RatioSampler allows unit-testing (previously: Randomizer). type RatioSampler interface { // Return this sample "offset" between [0.0, 1.0] sampleOffset(ts int64, sample *Sample) float64 AddRatioSample(r float64, sample *Sample) bool } -// Use Hash(labels.String()) / maxUint64 as a "deterministic" +// HashRatioSampler uses Hash(labels.String()) / maxUint64 as a "deterministic" // value in [0.0, 1.0]. type HashRatioSampler struct{} diff --git a/promql/engine_test.go b/promql/engine_test.go index 8e618d435..947c0e1ed 100644 --- a/promql/engine_test.go +++ b/promql/engine_test.go @@ -17,8 +17,6 @@ import ( "context" "errors" "fmt" - "math" - "os" "sort" "strconv" "sync" @@ -56,14 +54,7 @@ func TestMain(m *testing.M) { func TestQueryConcurrency(t *testing.T) { maxConcurrency := 10 - dir, err := os.MkdirTemp("", "test_concurrency") - require.NoError(t, err) - defer os.RemoveAll(dir) - queryTracker := promql.NewActiveQueryTracker(dir, maxConcurrency, nil) - t.Cleanup(func() { - require.NoError(t, queryTracker.Close()) - }) - + queryTracker := promql.NewActiveQueryTracker(t.TempDir(), maxConcurrency, nil) opts := promql.EngineOpts{ Logger: nil, Reg: nil, @@ -71,15 +62,17 @@ func TestQueryConcurrency(t *testing.T) { Timeout: 100 * time.Second, ActiveQueryTracker: queryTracker, } + engine := promqltest.NewTestEngineWithOpts(t, opts) - engine := promql.NewEngine(opts) ctx, cancelCtx := context.WithCancel(context.Background()) - defer cancelCtx() + t.Cleanup(cancelCtx) block := make(chan struct{}) processing := make(chan struct{}) done := make(chan int) - defer close(done) + t.Cleanup(func() { + close(done) + }) f := func(context.Context) error { select { @@ -164,7 +157,7 @@ func TestQueryTimeout(t *testing.T) { MaxSamples: 10, Timeout: 5 * time.Millisecond, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -189,7 +182,7 @@ func TestQueryCancel(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -263,7 +256,7 @@ func TestQueryError(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) errStorage := promql.ErrStorage{errors.New("storage error")} queryable := storage.QueryableFunc(func(mint, maxt int64) (storage.Querier, error) { return &errQuerier{err: errStorage}, nil @@ -597,7 +590,7 @@ func TestSelectHintsSetCorrectly(t *testing.T) { }, } { t.Run(tc.query, func(t *testing.T) { - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) hintsRecorder := &noopHintRecordingQueryable{} var ( @@ -628,7 +621,7 @@ func TestEngineShutdown(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) block := make(chan struct{}) @@ -764,7 +757,7 @@ load 10s t.Run(fmt.Sprintf("%d query=%s", i, c.Query), func(t *testing.T) { var err error var qry promql.Query - engine := newTestEngine() + engine := newTestEngine(t) if c.Interval == 0 { qry, err = engine.NewInstantQuery(context.Background(), storage, nil, c.Query, c.Start) } else { @@ -1306,7 +1299,7 @@ load 10s for _, c := range cases { t.Run(c.Query, func(t *testing.T) { opts := promql.NewPrometheusQueryOpts(true, 0) - engine := promqltest.NewTestEngine(true, 0, promqltest.DefaultMaxSamplesPerQuery) + engine := promqltest.NewTestEngine(t, true, 0, promqltest.DefaultMaxSamplesPerQuery) runQuery := func(expErr error) *stats.Statistics { var err error @@ -1333,7 +1326,7 @@ load 10s if c.SkipMaxCheck { return } - engine = promqltest.NewTestEngine(true, 0, stats.Samples.PeakSamples-1) + engine = promqltest.NewTestEngine(t, true, 0, stats.Samples.PeakSamples-1) runQuery(promql.ErrTooManySamples(env)) }) } @@ -1486,7 +1479,7 @@ load 10s for _, c := range cases { t.Run(c.Query, func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) testFunc := func(expError error) { var err error var qry promql.Query @@ -1507,18 +1500,18 @@ load 10s } // Within limit. - engine = promqltest.NewTestEngine(false, 0, c.MaxSamples) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples) testFunc(nil) // Exceeding limit. - engine = promqltest.NewTestEngine(false, 0, c.MaxSamples-1) + engine = promqltest.NewTestEngine(t, false, 0, c.MaxSamples-1) testFunc(promql.ErrTooManySamples(env)) }) } } func TestAtModifier(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := promqltest.LoadedStorage(t, ` load 10s metric{job="1"} 0+1x1000 @@ -1714,7 +1707,8 @@ load 1ms {F: 3600, T: 6 * 60 * 1000}, {F: 3600, T: 7 * 60 * 1000}, }, - Metric: labels.EmptyLabels(), + Metric: labels.EmptyLabels(), + DropName: true, }, }, }, @@ -1930,20 +1924,24 @@ func TestSubquerySelector(t *testing.T) { nil, promql.Matrix{ promql.Series{ - Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + Floats: []promql.FPoint{{F: 3, T: 7985000}, {F: 3, T: 7990000}, {F: 3, T: 7995000}, {F: 3, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "canary"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + Floats: []promql.FPoint{{F: 4, T: 7985000}, {F: 4, T: 7990000}, {F: 4, T: 7995000}, {F: 4, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "canary"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + Floats: []promql.FPoint{{F: 1, T: 7985000}, {F: 1, T: 7990000}, {F: 1, T: 7995000}, {F: 1, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "0", "group", "production"), + DropName: true, }, promql.Series{ - Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, - Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + Floats: []promql.FPoint{{F: 2, T: 7985000}, {F: 2, T: 7990000}, {F: 2, T: 7995000}, {F: 2, T: 8000000}}, + Metric: labels.FromStrings("job", "api-server", "instance", "1", "group", "production"), + DropName: true, }, }, nil, @@ -1996,7 +1994,7 @@ func TestSubquerySelector(t *testing.T) { }, } { t.Run("", func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := promqltest.LoadedStorage(t, tst.loadString) t.Cleanup(func() { storage.Close() }) @@ -2045,7 +2043,7 @@ func TestQueryLogger_basic(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) queryExec := func() { ctx, cancelCtx := context.WithCancel(context.Background()) @@ -2096,7 +2094,7 @@ func TestQueryLogger_fields(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) @@ -2125,7 +2123,7 @@ func TestQueryLogger_error(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) f1 := NewFakeQueryLogger() engine.SetQueryLogger(f1) @@ -3008,7 +3006,7 @@ func TestEngineOptsValidation(t *testing.T) { } for _, c := range cases { - eng := promql.NewEngine(c.opts) + eng := promqltest.NewTestEngineWithOpts(t, c.opts) _, err1 := eng.NewInstantQuery(context.Background(), nil, nil, c.query, time.Unix(10, 0)) _, err2 := eng.NewRangeQuery(context.Background(), nil, nil, c.query, time.Unix(0, 0), time.Unix(10, 0), time.Second) if c.fail { @@ -3022,7 +3020,7 @@ func TestEngineOptsValidation(t *testing.T) { } func TestInstantQueryWithRangeVectorSelector(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) baseT := timestamp.Time(0) storage := promqltest.LoadedStorage(t, ` @@ -3097,217 +3095,6 @@ func TestInstantQueryWithRangeVectorSelector(t *testing.T) { } } -func TestNativeHistogram_Sum_Count_Add_AvgOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - cases := []struct { - histograms []histogram.Histogram - expected histogram.FloatHistogram - expectedAvg histogram.FloatHistogram - }{ - { - histograms: []histogram.Histogram{ - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 25, - Sum: 1234.5, - ZeroThreshold: 0.001, - ZeroCount: 4, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 1, Length: 2}, - }, - PositiveBuckets: []int64{1, 1, -1, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 2}, - {Offset: 2, Length: 2}, - }, - NegativeBuckets: []int64{2, 2, -3, 8}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 41, - Sum: 2345.6, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 0, - Count: 41, - Sum: 1111.1, - ZeroThreshold: 0.001, - ZeroCount: 5, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 4}, - {Offset: 0, Length: 0}, - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 1, Length: 4}, - {Offset: 2, Length: 0}, - {Offset: 2, Length: 3}, - }, - NegativeBuckets: []int64{1, 3, -2, 5, -2, 0, -3}, - }, - { - CounterResetHint: histogram.GaugeType, - Schema: 1, // Everything is 0 just to make the count 4 so avg has nicer numbers. - }, - }, - expected: histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 0, - ZeroThreshold: 0.001, - ZeroCount: 14, - Count: 107, - Sum: 4691.2, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 7}, - }, - PositiveBuckets: []float64{3, 8, 2, 5, 3, 2, 2}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 6}, - {Offset: 3, Length: 3}, - }, - NegativeBuckets: []float64{2, 6, 8, 4, 15, 9, 10, 10, 4}, - }, - expectedAvg: histogram.FloatHistogram{ - CounterResetHint: histogram.GaugeType, - Schema: 0, - ZeroThreshold: 0.001, - ZeroCount: 3.5, - Count: 26.75, - Sum: 1172.8, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 7}, - }, - PositiveBuckets: []float64{0.75, 2, 0.5, 1.25, 0.75, 0.5, 0.5}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 6}, - {Offset: 3, Length: 3}, - }, - NegativeBuckets: []float64{0.5, 1.5, 2, 1, 3.75, 2.25, 2.5, 2.5, 1}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - seriesNameOverTime := "sparse_histogram_series_over_time" - - engine := newTestEngine() - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - _, err := app.Append(0, labels.FromStrings("__name__", "float_series", "idx", "0"), ts, 42) - require.NoError(t, err) - for idx1, h := range c.histograms { - lbls := labels.FromStrings("__name__", seriesName, "idx", strconv.Itoa(idx1)) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - - lbls = labels.FromStrings("__name__", seriesNameOverTime) - newTs := ts + int64(idx1)*int64(time.Minute/time.Millisecond) - // Since we mutate h later, we need to create a copy here. - if floatHisto { - _, err = app.AppendHistogram(0, lbls, newTs, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, newTs, h.Copy(), nil) - } - require.NoError(t, err) - } - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, ts int64, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - require.Empty(t, res.Warnings) - - vector, err := res.Vector() - require.NoError(t, err) - - testutil.RequireEqual(t, exp, vector) - } - queryAndCheckAnnotations := func(queryString string, ts int64, expWarnings annotations.Annotations) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - require.Equal(t, expWarnings, res.Warnings) - } - - // sum(). - queryString := fmt.Sprintf("sum(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - queryString = `sum({idx="0"})` - var annos annotations.Annotations - annos.Add(annotations.NewMixedFloatsHistogramsAggWarning(posrange.PositionRange{Start: 4, End: 13})) - queryAndCheckAnnotations(queryString, ts, annos) - - // + operator. - queryString = fmt.Sprintf(`%s{idx="0"}`, seriesName) - for idx := 1; idx < len(c.histograms); idx++ { - queryString += fmt.Sprintf(` + ignoring(idx) %s{idx="%d"}`, seriesName, idx) - } - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expected, Metric: labels.EmptyLabels()}}) - - // count(). - queryString = fmt.Sprintf("count(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, F: 4, Metric: labels.EmptyLabels()}}) - - // avg(). - queryString = fmt.Sprintf("avg(%s)", seriesName) - queryAndCheck(queryString, ts, []promql.Sample{{T: ts, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) - - offset := int64(len(c.histograms) - 1) - newTs := ts + offset*int64(time.Minute/time.Millisecond) - - // sum_over_time(). - queryString = fmt.Sprintf("sum_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expected, Metric: labels.EmptyLabels()}}) - - // avg_over_time(). - queryString = fmt.Sprintf("avg_over_time(%s[%dm:1m])", seriesNameOverTime, offset) - queryAndCheck(queryString, newTs, []promql.Sample{{T: newTs, H: &c.expectedAvg, Metric: labels.EmptyLabels()}}) - }) - idx0++ - } - } -} - func TestNativeHistogram_SubOperator(t *testing.T) { // TODO(codesome): Integrate histograms into the PromQL testing framework // and write more tests there. @@ -3487,7 +3274,7 @@ func TestNativeHistogram_SubOperator(t *testing.T) { for _, c := range cases { for _, floatHisto := range []bool{true, false} { t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - engine := newTestEngine() + engine := newTestEngine(t) storage := teststorage.New(t) t.Cleanup(func() { storage.Close() }) @@ -3543,171 +3330,6 @@ func TestNativeHistogram_SubOperator(t *testing.T) { } } -func TestNativeHistogram_MulDivOperator(t *testing.T) { - // TODO(codesome): Integrate histograms into the PromQL testing framework - // and write more tests there. - originalHistogram := histogram.Histogram{ - Schema: 0, - Count: 21, - Sum: 33, - ZeroThreshold: 0.001, - ZeroCount: 3, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []int64{3, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []int64{3, 0, 0}, - } - - cases := []struct { - scalar float64 - histogram histogram.Histogram - expectedMul histogram.FloatHistogram - expectedDiv histogram.FloatHistogram - }{ - { - scalar: 3, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 63, - Sum: 99, - ZeroThreshold: 0.001, - ZeroCount: 9, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{9, 9, 9}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{9, 9, 9}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: 7, - Sum: 11, - ZeroThreshold: 0.001, - ZeroCount: 1, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{1, 1, 1}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{1, 1, 1}, - }, - }, - { - scalar: 0, - histogram: originalHistogram, - expectedMul: histogram.FloatHistogram{ - Schema: 0, - Count: 0, - Sum: 0, - ZeroThreshold: 0.001, - ZeroCount: 0, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{0, 0, 0}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{0, 0, 0}, - }, - expectedDiv: histogram.FloatHistogram{ - Schema: 0, - Count: math.Inf(1), - Sum: math.Inf(1), - ZeroThreshold: 0.001, - ZeroCount: math.Inf(1), - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - PositiveBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - NegativeSpans: []histogram.Span{ - {Offset: 0, Length: 3}, - }, - NegativeBuckets: []float64{math.Inf(1), math.Inf(1), math.Inf(1)}, - }, - }, - } - - idx0 := int64(0) - for _, c := range cases { - for _, floatHisto := range []bool{true, false} { - t.Run(fmt.Sprintf("floatHistogram=%t %d", floatHisto, idx0), func(t *testing.T) { - storage := teststorage.New(t) - t.Cleanup(func() { storage.Close() }) - - seriesName := "sparse_histogram_series" - floatSeriesName := "float_series" - - engine := newTestEngine() - - ts := idx0 * int64(10*time.Minute/time.Millisecond) - app := storage.Appender(context.Background()) - h := c.histogram - lbls := labels.FromStrings("__name__", seriesName) - // Since we mutate h later, we need to create a copy here. - var err error - if floatHisto { - _, err = app.AppendHistogram(0, lbls, ts, nil, h.Copy().ToFloat(nil)) - } else { - _, err = app.AppendHistogram(0, lbls, ts, h.Copy(), nil) - } - require.NoError(t, err) - _, err = app.Append(0, labels.FromStrings("__name__", floatSeriesName), ts, c.scalar) - require.NoError(t, err) - require.NoError(t, app.Commit()) - - queryAndCheck := func(queryString string, exp promql.Vector) { - qry, err := engine.NewInstantQuery(context.Background(), storage, nil, queryString, timestamp.Time(ts)) - require.NoError(t, err) - - res := qry.Exec(context.Background()) - require.NoError(t, res.Err) - - vector, err := res.Vector() - require.NoError(t, err) - - testutil.RequireEqual(t, exp, vector) - } - - // histogram * scalar. - queryString := fmt.Sprintf(`%s * %f`, seriesName, c.scalar) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // scalar * histogram. - queryString = fmt.Sprintf(`%f * %s`, c.scalar, seriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram * float. - queryString = fmt.Sprintf(`%s * %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // float * histogram. - queryString = fmt.Sprintf(`%s * %s`, floatSeriesName, seriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedMul, Metric: labels.EmptyLabels()}}) - - // histogram / scalar. - queryString = fmt.Sprintf(`%s / %f`, seriesName, c.scalar) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - - // histogram / float. - queryString = fmt.Sprintf(`%s / %s`, seriesName, floatSeriesName) - queryAndCheck(queryString, []promql.Sample{{T: ts, H: &c.expectedDiv, Metric: labels.EmptyLabels()}}) - }) - idx0++ - } - } -} - func TestQueryLookbackDelta(t *testing.T) { var ( load = `load 5m @@ -3771,7 +3393,7 @@ metric 0 1 2 for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { - engine := promqltest.NewTestEngine(false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) + engine := promqltest.NewTestEngine(t, false, c.engineLookback, promqltest.DefaultMaxSamplesPerQuery) storage := promqltest.LoadedStorage(t, load) t.Cleanup(func() { storage.Close() }) @@ -3797,3 +3419,62 @@ func makeInt64Pointer(val int64) *int64 { *valp = val return valp } + +func TestHistogramCopyFromIteratorRegression(t *testing.T) { + // Loading the following histograms creates two chunks because there's a + // counter reset. Not only the counter is lower in the last histogram + // but also there's missing buckets. + // This in turns means that chunk iterators will have different spans. + load := `load 1m +histogram {{sum:4 count:4 buckets:[2 2]}} {{sum:6 count:6 buckets:[3 3]}} {{sum:1 count:1 buckets:[1]}} +` + storage := promqltest.LoadedStorage(t, load) + t.Cleanup(func() { storage.Close() }) + engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) + + verify := func(t *testing.T, qry promql.Query, expected []histogram.FloatHistogram) { + res := qry.Exec(context.Background()) + require.NoError(t, res.Err) + + m, ok := res.Value.(promql.Matrix) + require.True(t, ok) + + require.Len(t, m, 1) + series := m[0] + + require.Empty(t, series.Floats) + require.Len(t, series.Histograms, len(expected)) + for i, e := range expected { + series.Histograms[i].H.CounterResetHint = histogram.UnknownCounterReset // Don't care. + require.Equal(t, &e, series.Histograms[i].H) + } + } + + qry, err := engine.NewRangeQuery(context.Background(), storage, nil, "increase(histogram[60s])", time.Unix(0, 0), time.Unix(0, 0).Add(1*time.Minute), time.Minute) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 2, + Sum: 2, // Increase from 4 to 6 is 2. + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, // Two buckets changed between the first and second histogram. + PositiveBuckets: []float64{1, 1}, // Increase from 2 to 3 is 1 in both buckets. + }, + }) + + qry, err = engine.NewInstantQuery(context.Background(), storage, nil, "histogram[60s]", time.Unix(0, 0).Add(2*time.Minute)) + require.NoError(t, err) + verify(t, qry, []histogram.FloatHistogram{ + { + Count: 6, + Sum: 6, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 2}}, + PositiveBuckets: []float64{3, 3}, + }, + { + Count: 1, + Sum: 1, + PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, + PositiveBuckets: []float64{1}, + }, + }) +} diff --git a/promql/functions.go b/promql/functions.go index dcc2cd759..8141d2a9e 100644 --- a/promql/functions.go +++ b/promql/functions.go @@ -14,6 +14,7 @@ package promql import ( + "context" "errors" "fmt" "math" @@ -97,9 +98,10 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod lastT = samples.Histograms[numSamplesMinusOne].T var newAnnos annotations.Annotations resultHistogram, newAnnos = histogramRate(samples.Histograms, isCounter, metricName, args[0].PositionRange()) + annos.Merge(newAnnos) if resultHistogram == nil { // The histograms are not compatible with each other. - return enh.Out, annos.Merge(newAnnos) + return enh.Out, annos } case len(samples.Floats) > 1: numSamplesMinusOne = len(samples.Floats) - 1 @@ -178,17 +180,29 @@ func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNod // Otherwise, it returns the calculated histogram and an empty annotation. func histogramRate(points []HPoint, isCounter bool, metricName string, pos posrange.PositionRange) (*histogram.FloatHistogram, annotations.Annotations) { prev := points[0].H + usingCustomBuckets := prev.UsesCustomBuckets() last := points[len(points)-1].H if last == nil { return nil, annotations.New().Add(annotations.NewMixedFloatsHistogramsWarning(metricName, pos)) } + minSchema := prev.Schema if last.Schema < minSchema { minSchema = last.Schema } + if last.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } + var annos annotations.Annotations + // We check for gauge type histograms in the loop below, but the loop below does not run on the first and last point, + // so check the first and last point now. + if isCounter && (prev.CounterResetHint == histogram.GaugeType || last.CounterResetHint == histogram.GaugeType) { + annos.Add(annotations.NewNativeHistogramNotCounterWarning(metricName, pos)) + } + // First iteration to find out two things: // - What's the smallest relevant schema? // - Are all data points histograms? @@ -208,6 +222,9 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra if curr.Schema < minSchema { minSchema = curr.Schema } + if curr.UsesCustomBuckets() != usingCustomBuckets { + return nil, annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) + } } h := last.CopyToSchema(minSchema) @@ -241,7 +258,7 @@ func histogramRate(points []HPoint, isCounter bool, metricName string, pos posra } h.CounterResetHint = histogram.GaugeType - return h.Compact(0), nil + return h.Compact(0), annos } // === delta(Matrix parser.ValueTypeMatrix) (Vector, Annotations) === @@ -390,17 +407,22 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // In case the labels are the same, NaN should sort to the bottom, so take - // ascending sort with NaN first and reverse it. - var anno annotations.Annotations - vals[0], anno = funcSort(vals, args, enh) - labels := stringSliceFromArgs(args[1:]) + // First, sort by the full label set. This ensures a consistent ordering in case sorting by the + // labels provided as arguments is not conclusive. slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label + return labels.Compare(a.Metric, b.Metric) + }) + + labels := stringSliceFromArgs(args[1:]) + // Next, sort by the labels provided as arguments. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label. for _, label := range labels { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) + // If we encounter multiple samples with the same label values, the sorting which was + // performed in the first step will act as a "tie breaker". if lv1 == lv2 { continue } @@ -415,22 +437,27 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode return 0 }) - return vals[0].(Vector), anno + return vals[0].(Vector), nil } // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // In case the labels are the same, NaN should sort to the bottom, so take - // ascending sort with NaN first and reverse it. - var anno annotations.Annotations - vals[0], anno = funcSortDesc(vals, args, enh) - labels := stringSliceFromArgs(args[1:]) + // First, sort by the full label set. This ensures a consistent ordering in case sorting by the + // labels provided as arguments is not conclusive. slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label + return labels.Compare(b.Metric, a.Metric) + }) + + labels := stringSliceFromArgs(args[1:]) + // Next, sort by the labels provided as arguments. + slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { + // Iterate over each given label. for _, label := range labels { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) + // If we encounter multiple samples with the same label values, the sorting which was + // performed in the first step will act as a "tie breaker". if lv1 == lv2 { continue } @@ -445,21 +472,25 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval return 0 }) - return vals[0].(Vector), anno + return vals[0].(Vector), nil } // === clamp(Vector parser.ValueTypeVector, min, max Scalar) (Vector, Annotations) === func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F - max := vals[2].(Vector)[0].F - if max < min { + minVal := vals[1].(Vector)[0].F + maxVal := vals[2].(Vector)[0].F + if maxVal < minVal { return enh.Out, nil } for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Max(min, math.Min(max, el.F)), + Metric: el.Metric, + F: math.Max(minVal, math.Min(maxVal, el.F)), + DropName: true, }) } return enh.Out, nil @@ -468,11 +499,15 @@ func funcClamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper // === clamp_max(Vector parser.ValueTypeVector, max Scalar) (Vector, Annotations) === func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - max := vals[1].(Vector)[0].F + maxVal := vals[1].(Vector)[0].F for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Min(max, el.F), + Metric: el.Metric, + F: math.Min(maxVal, el.F), + DropName: true, }) } return enh.Out, nil @@ -481,11 +516,15 @@ func funcClampMax(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === clamp_min(Vector parser.ValueTypeVector, min Scalar) (Vector, Annotations) === func funcClampMin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) - min := vals[1].(Vector)[0].F + minVal := vals[1].(Vector)[0].F for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: math.Max(min, el.F), + Metric: el.Metric, + F: math.Max(minVal, el.F), + DropName: true, }) } return enh.Out, nil @@ -506,8 +545,9 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper for _, el := range vec { f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f, + Metric: el.Metric, + F: f, + DropName: true, }) } return enh.Out, nil @@ -573,9 +613,28 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return vec, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - var mean, count, c float64 + var ( + sum, mean, count, kahanC float64 + incrementalMean bool + ) for _, f := range s.Floats { count++ + if !incrementalMean { + newSum, newC := kahanSumInc(f.F, sum, kahanC) + // Perform regular mean calculation as long as + // the sum doesn't overflow and (in any case) + // for the first iteration (even if we start + // with ±Inf) to not run into division-by-zero + // problems below. + if count == 1 || !math.IsInf(newSum, 0) { + sum, kahanC = newSum, newC + continue + } + // Handle overflow by reverting to incremental calculation of the mean value. + incrementalMean = true + mean = sum / (count - 1) + kahanC /= count - 1 + } if math.IsInf(mean, 0) { if math.IsInf(f.F, 0) && (mean > 0) == (f.F > 0) { // The `mean` and `f.F` values are `Inf` of the same sign. They @@ -593,13 +652,13 @@ func funcAvgOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode continue } } - mean, c = kahanSumInc(f.F/count-mean/count, mean, c) + correctedMean := mean + kahanC + mean, kahanC = kahanSumInc(f.F/count-correctedMean/count, mean, kahanC) } - - if math.IsInf(mean, 0) { - return mean + if incrementalMean { + return mean + kahanC } - return mean + c + return (sum + kahanC) / count }), nil } @@ -665,13 +724,13 @@ func funcMaxOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - max := s.Floats[0].F + maxVal := s.Floats[0].F for _, f := range s.Floats { - if f.F > max || math.IsNaN(max) { - max = f.F + if f.F > maxVal || math.IsNaN(maxVal) { + maxVal = f.F } } - return max + return maxVal }), nil } @@ -685,13 +744,13 @@ func funcMinOverTime(vals []parser.Value, args parser.Expressions, enh *EvalNode return enh.Out, nil } return aggrOverTime(vals, enh, func(s Series) float64 { - min := s.Floats[0].F + minVal := s.Floats[0].F for _, f := range s.Floats { - if f.F < min || math.IsNaN(min) { - min = f.F + if f.F < minVal || math.IsNaN(minVal) { + minVal = f.F } } - return min + return minVal }), nil } @@ -837,9 +896,13 @@ func funcPresentOverTime(vals []parser.Value, args parser.Expressions, enh *Eval func simpleFunc(vals []parser.Value, enh *EvalNodeHelper, f func(float64) float64) Vector { for _, el := range vals[0].(Vector) { if el.H == nil { // Process only float samples. + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f(el.F), + Metric: el.Metric, + F: f(el.F), + DropName: true, }) } } @@ -983,9 +1046,13 @@ func funcSgn(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { vec := vals[0].(Vector) for _, el := range vec { + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: float64(el.T) / 1000, + Metric: el.Metric, + F: float64(el.T) / 1000, + DropName: true, }) } return enh.Out, nil @@ -1092,9 +1159,13 @@ func funcHistogramCount(vals []parser.Value, args parser.Expressions, enh *EvalN if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Count, + Metric: sample.Metric, + F: sample.H.Count, + DropName: true, }) } return enh.Out, nil @@ -1109,9 +1180,13 @@ func funcHistogramSum(vals []parser.Value, args parser.Expressions, enh *EvalNod if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Sum, + Metric: sample.Metric, + F: sample.H.Sum, + DropName: true, }) } return enh.Out, nil @@ -1126,9 +1201,13 @@ func funcHistogramAvg(vals []parser.Value, args parser.Expressions, enh *EvalNod if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: sample.H.Sum / sample.H.Count, + Metric: sample.Metric, + F: sample.H.Sum / sample.H.Count, + DropName: true, }) } return enh.Out, nil @@ -1165,9 +1244,13 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval } variance += cVariance variance /= sample.H.Count + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: math.Sqrt(variance), + Metric: sample.Metric, + F: math.Sqrt(variance), + DropName: true, }) } return enh.Out, nil @@ -1204,9 +1287,13 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval } variance += cVariance variance /= sample.H.Count + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: variance, + Metric: sample.Metric, + F: variance, + DropName: true, }) } return enh.Out, nil @@ -1223,9 +1310,13 @@ func funcHistogramFraction(vals []parser.Value, args parser.Expressions, enh *Ev if sample.H == nil { continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: histogramFraction(lower, upper, sample.H), + Metric: sample.Metric, + F: histogramFraction(lower, upper, sample.H), + DropName: true, }) } return enh.Out, nil @@ -1293,9 +1384,13 @@ func funcHistogramQuantile(vals []parser.Value, args parser.Expressions, enh *Ev continue } + if !enh.enableDelayedNameRemoval { + sample.Metric = sample.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: sample.Metric.DropMetricName(), - F: histogramQuantile(q, sample.H), + Metric: sample.Metric, + F: histogramQuantile(q, sample.H), + DropName: true, }) } @@ -1369,7 +1464,7 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp } // label_replace function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) repl = stringFromArg(args[2]) @@ -1385,7 +1480,7 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an panic(fmt.Errorf("invalid destination label name in label_replace(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1397,6 +1492,11 @@ func (ev *evaluator) evalLabelReplace(args parser.Expressions) (parser.Value, an lb.Reset(el.Metric) lb.Set(dst, string(res)) matrix[i].Metric = lb.Labels() + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } } } if matrix.ContainsSameLabelset() { @@ -1421,7 +1521,7 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe } // label_join function operates only on series; does not look at timestamps or values. -func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annotations.Annotations) { +func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { var ( dst = stringFromArg(args[1]) sep = stringFromArg(args[2]) @@ -1438,7 +1538,7 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot panic(fmt.Errorf("invalid destination label name in label_join(): %s", dst)) } - val, ws := ev.eval(args[0]) + val, ws := ev.eval(ctx, args[0]) matrix := val.(Matrix) srcVals := make([]string, len(srcLabels)) lb := labels.NewBuilder(labels.EmptyLabels()) @@ -1451,6 +1551,12 @@ func (ev *evaluator) evalLabelJoin(args parser.Expressions) (parser.Value, annot lb.Reset(el.Metric) lb.Set(dst, strval) matrix[i].Metric = lb.Labels() + + if dst == model.MetricNameLabel { + matrix[i].DropName = false + } else { + matrix[i].DropName = el.DropName + } } return matrix, ws @@ -1473,9 +1579,13 @@ func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) flo for _, el := range vals[0].(Vector) { t := time.Unix(int64(el.F), 0).UTC() + if !enh.enableDelayedNameRemoval { + el.Metric = el.Metric.DropMetricName() + } enh.Out = append(enh.Out, Sample{ - Metric: el.Metric.DropMetricName(), - F: f(t), + Metric: el.Metric, + F: f(t), + DropName: true, }) } return enh.Out diff --git a/promql/functions_test.go b/promql/functions_test.go index aef59c837..9ee0ba51d 100644 --- a/promql/functions_test.go +++ b/promql/functions_test.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/util/teststorage" ) @@ -39,7 +40,7 @@ func TestDeriv(t *testing.T) { MaxSamples: 10000, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) a := storage.Appender(context.Background()) diff --git a/promql/fuzz.go b/promql/fuzz.go index 3fd50b949..5f08e6a72 100644 --- a/promql/fuzz.go +++ b/promql/fuzz.go @@ -68,6 +68,10 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { panic(warning) } + if contentType == "application/openmetrics-text" { + p = textparse.NewOpenMetricsParser(in, symbolTable) + } + var err error for { _, err = p.Next() diff --git a/promql/parser/ast.go b/promql/parser/ast.go index 830e8a2c5..162d7817a 100644 --- a/promql/parser/ast.go +++ b/promql/parser/ast.go @@ -352,8 +352,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { - //nolint: errcheck - Walk(f, node, nil) + Walk(f, node, nil) //nolint:errcheck } // Children returns a list of all child nodes of a syntax tree node. @@ -419,7 +418,7 @@ func mergeRanges(first, last Node) posrange.PositionRange { } } -// Item implements the Node interface. +// PositionRange implements the Node interface. // This makes it possible to call mergeRanges on them. func (i *Item) PositionRange() posrange.PositionRange { return posrange.PositionRange{ diff --git a/promql/parser/generated_parser.y b/promql/parser/generated_parser.y index b8e6aa373..da24be0c4 100644 --- a/promql/parser/generated_parser.y +++ b/promql/parser/generated_parser.y @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} @@ -360,11 +362,19 @@ grouping_label_list: grouping_label : maybe_label { - if !isLabel($1.Val) { + if !model.LabelName($1.Val).IsValid() { yylex.(*parser).unexpected("grouping opts", "label") } $$ = $1 } + | STRING { + if !model.LabelName(yylex.(*parser).unquoteString($1.Val)).IsValid() { + yylex.(*parser).unexpected("grouping opts", "label") + } + $$ = $1 + $$.Pos++ + $$.Val = yylex.(*parser).unquoteString($$.Val) + } | error { yylex.(*parser).unexpected("grouping opts", "label"); $$ = Item{} } ; diff --git a/promql/parser/generated_parser.y.go b/promql/parser/generated_parser.y.go index 62786052e..22231f73e 100644 --- a/promql/parser/generated_parser.y.go +++ b/promql/parser/generated_parser.y.go @@ -12,6 +12,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) type yySymType struct { @@ -249,290 +251,293 @@ var yyExca = [...]int16{ 1, -1, -2, 0, -1, 37, - 1, 137, - 10, 137, - 24, 137, + 1, 138, + 10, 138, + 24, 138, -2, 0, -1, 61, - 2, 180, - 15, 180, - 79, 180, - 85, 180, - -2, 101, - -1, 62, 2, 181, 15, 181, 79, 181, 85, 181, -2, 102, - -1, 63, + -1, 62, 2, 182, 15, 182, 79, 182, 85, 182, - -2, 104, - -1, 64, + -2, 103, + -1, 63, 2, 183, 15, 183, 79, 183, 85, 183, -2, 105, - -1, 65, + -1, 64, 2, 184, 15, 184, 79, 184, 85, 184, -2, 106, - -1, 66, + -1, 65, 2, 185, 15, 185, 79, 185, 85, 185, - -2, 111, - -1, 67, + -2, 107, + -1, 66, 2, 186, 15, 186, 79, 186, 85, 186, - -2, 113, - -1, 68, + -2, 112, + -1, 67, 2, 187, 15, 187, 79, 187, 85, 187, - -2, 115, - -1, 69, + -2, 114, + -1, 68, 2, 188, 15, 188, 79, 188, 85, 188, -2, 116, - -1, 70, + -1, 69, 2, 189, 15, 189, 79, 189, 85, 189, -2, 117, - -1, 71, + -1, 70, 2, 190, 15, 190, 79, 190, 85, 190, -2, 118, - -1, 72, + -1, 71, 2, 191, 15, 191, 79, 191, 85, 191, -2, 119, - -1, 73, + -1, 72, 2, 192, 15, 192, 79, 192, 85, 192, - -2, 123, - -1, 74, + -2, 120, + -1, 73, 2, 193, 15, 193, 79, 193, 85, 193, -2, 124, + -1, 74, + 2, 194, + 15, 194, + 79, 194, + 85, 194, + -2, 125, -1, 200, - 9, 242, - 12, 242, - 13, 242, - 18, 242, - 19, 242, - 25, 242, - 41, 242, - 47, 242, - 48, 242, - 51, 242, - 57, 242, - 62, 242, - 63, 242, - 64, 242, - 65, 242, - 66, 242, - 67, 242, - 68, 242, - 69, 242, - 70, 242, - 71, 242, - 72, 242, - 73, 242, - 74, 242, - 75, 242, - 79, 242, - 83, 242, - 85, 242, - 88, 242, - 89, 242, + 9, 243, + 12, 243, + 13, 243, + 18, 243, + 19, 243, + 25, 243, + 41, 243, + 47, 243, + 48, 243, + 51, 243, + 57, 243, + 62, 243, + 63, 243, + 64, 243, + 65, 243, + 66, 243, + 67, 243, + 68, 243, + 69, 243, + 70, 243, + 71, 243, + 72, 243, + 73, 243, + 74, 243, + 75, 243, + 79, 243, + 83, 243, + 85, 243, + 88, 243, + 89, 243, -2, 0, -1, 201, - 9, 242, - 12, 242, - 13, 242, - 18, 242, - 19, 242, - 25, 242, - 41, 242, - 47, 242, - 48, 242, - 51, 242, - 57, 242, - 62, 242, - 63, 242, - 64, 242, - 65, 242, - 66, 242, - 67, 242, - 68, 242, - 69, 242, - 70, 242, - 71, 242, - 72, 242, - 73, 242, - 74, 242, - 75, 242, - 79, 242, - 83, 242, - 85, 242, - 88, 242, - 89, 242, + 9, 243, + 12, 243, + 13, 243, + 18, 243, + 19, 243, + 25, 243, + 41, 243, + 47, 243, + 48, 243, + 51, 243, + 57, 243, + 62, 243, + 63, 243, + 64, 243, + 65, 243, + 66, 243, + 67, 243, + 68, 243, + 69, 243, + 70, 243, + 71, 243, + 72, 243, + 73, 243, + 74, 243, + 75, 243, + 79, 243, + 83, 243, + 85, 243, + 88, 243, + 89, 243, -2, 0, } const yyPrivate = 57344 -const yyLast = 763 +const yyLast = 799 var yyAct = [...]int16{ - 155, 333, 331, 275, 338, 152, 226, 39, 192, 44, - 290, 289, 156, 118, 82, 178, 106, 55, 109, 105, - 53, 77, 133, 56, 110, 108, 22, 54, 356, 6, - 172, 107, 60, 57, 345, 346, 347, 348, 111, 198, - 328, 199, 200, 201, 327, 154, 303, 355, 266, 75, - 354, 151, 160, 128, 259, 18, 19, 160, 55, 20, - 301, 101, 159, 104, 113, 76, 114, 159, 54, 258, - 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 161, 112, 269, 13, 103, 161, - 292, 24, 115, 30, 309, 265, 31, 32, 332, 267, - 162, 270, 109, 223, 323, 162, 150, 222, 110, 308, - 301, 263, 310, 149, 161, 163, 307, 271, 264, 173, - 167, 170, 221, 322, 166, 2, 3, 4, 5, 194, - 162, 157, 158, 179, 262, 180, 184, 197, 165, 186, - 196, 195, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 129, 188, 121, - 217, 121, 219, 220, 55, 38, 218, 53, 77, 119, - 56, 119, 339, 22, 54, 182, 169, 260, 298, 117, - 57, 187, 122, 297, 122, 181, 183, 160, 295, 168, - 261, 180, 111, 77, 164, 55, 75, 159, 296, 357, - 7, 55, 18, 19, 268, 54, 20, 294, 35, 287, - 288, 54, 76, 291, 321, 320, 319, 61, 62, 63, + 155, 334, 332, 276, 339, 152, 226, 39, 192, 44, + 291, 290, 156, 118, 82, 178, 229, 107, 106, 346, + 347, 348, 349, 109, 108, 198, 239, 199, 133, 110, + 105, 60, 245, 121, 6, 329, 325, 111, 328, 228, + 200, 201, 160, 119, 304, 267, 293, 128, 260, 160, + 151, 261, 159, 302, 358, 311, 122, 55, 89, 159, + 196, 241, 242, 259, 113, 243, 114, 54, 98, 99, + 302, 112, 101, 256, 104, 88, 230, 232, 234, 235, + 236, 244, 246, 249, 250, 251, 252, 253, 257, 258, + 160, 115, 231, 233, 237, 238, 240, 247, 248, 103, + 159, 109, 254, 255, 324, 150, 357, 110, 333, 218, + 111, 340, 310, 149, 77, 163, 7, 105, 35, 173, + 167, 170, 161, 323, 165, 356, 166, 309, 355, 194, + 2, 3, 4, 5, 308, 322, 184, 197, 162, 186, + 321, 195, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 229, 129, 101, + 217, 104, 219, 220, 190, 266, 270, 239, 160, 121, + 268, 193, 264, 245, 55, 196, 154, 225, 159, 119, + 228, 271, 188, 160, 54, 161, 103, 117, 265, 84, + 262, 299, 122, 159, 320, 263, 298, 272, 10, 83, + 161, 162, 241, 242, 269, 187, 243, 185, 79, 288, + 289, 297, 319, 292, 256, 161, 162, 230, 232, 234, + 235, 236, 244, 246, 249, 250, 251, 252, 253, 257, + 258, 162, 294, 231, 233, 237, 238, 240, 247, 248, + 318, 317, 316, 254, 255, 180, 315, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 157, 158, 169, 105, 314, 296, 300, 301, + 303, 223, 305, 313, 55, 222, 179, 168, 180, 84, + 306, 307, 177, 125, 54, 182, 295, 176, 124, 83, + 221, 312, 87, 89, 8, 181, 183, 81, 37, 86, + 175, 123, 36, 98, 99, 326, 327, 101, 102, 104, + 88, 127, 331, 126, 50, 336, 337, 338, 182, 335, + 78, 1, 342, 341, 344, 343, 49, 48, 181, 183, + 350, 351, 47, 55, 103, 352, 53, 77, 164, 56, + 46, 354, 22, 54, 59, 55, 172, 9, 9, 57, + 132, 45, 43, 130, 171, 54, 359, 42, 131, 41, + 40, 51, 191, 353, 273, 75, 85, 189, 224, 80, + 345, 18, 19, 120, 153, 20, 58, 227, 52, 116, + 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 0, 0, 0, 13, 0, 0, 0, 24, 0, 30, + 0, 0, 31, 32, 55, 38, 0, 53, 77, 0, + 56, 275, 0, 22, 54, 0, 0, 0, 274, 0, + 57, 0, 278, 279, 277, 284, 286, 283, 285, 280, + 281, 282, 287, 0, 0, 0, 75, 0, 0, 0, + 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, + 0, 0, 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, 182, 293, 318, 13, 160, 317, 316, 24, 315, - 30, 181, 183, 31, 32, 159, 134, 135, 136, 137, - 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, - 148, 314, 313, 55, 105, 84, 84, 299, 300, 302, - 86, 304, 177, 54, 190, 83, 83, 176, 160, 305, - 306, 193, 125, 185, 81, 196, 10, 124, 159, 312, - 175, 311, 89, 50, 8, 36, 79, 228, 37, 78, - 123, 1, 98, 99, 325, 326, 101, 238, 104, 88, - 161, 330, 49, 244, 335, 336, 337, 324, 334, 48, - 47, 341, 340, 343, 342, 127, 162, 126, 59, 349, - 350, 9, 9, 103, 351, 46, 132, 45, 43, 130, - 353, 171, 240, 241, 42, 131, 242, 41, 40, 51, - 191, 352, 272, 85, 255, 358, 189, 229, 231, 233, - 234, 235, 243, 245, 248, 249, 250, 251, 252, 256, - 257, 224, 80, 230, 232, 236, 237, 239, 246, 247, - 344, 120, 55, 253, 254, 53, 77, 153, 56, 274, - 58, 22, 54, 227, 52, 116, 273, 0, 57, 0, - 277, 278, 276, 283, 285, 282, 284, 279, 280, 281, - 286, 0, 0, 0, 75, 0, 0, 0, 0, 0, - 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, - 76, 0, 0, 0, 0, 61, 62, 63, 64, 65, - 66, 67, 68, 69, 70, 71, 72, 73, 74, 228, - 0, 0, 13, 0, 0, 0, 24, 0, 30, 238, - 329, 31, 32, 0, 0, 244, 0, 0, 0, 225, - 0, 277, 278, 276, 283, 285, 282, 284, 279, 280, - 281, 286, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 240, 241, 0, 0, 242, 0, - 0, 0, 17, 77, 0, 105, 255, 0, 22, 229, - 231, 233, 234, 235, 243, 245, 248, 249, 250, 251, - 252, 256, 257, 0, 0, 230, 232, 236, 237, 239, - 246, 247, 87, 89, 0, 253, 254, 18, 19, 0, - 0, 20, 0, 98, 99, 17, 35, 101, 102, 104, - 88, 22, 11, 12, 14, 15, 16, 21, 23, 25, - 26, 27, 28, 29, 33, 34, 0, 0, 0, 13, - 0, 0, 0, 24, 103, 30, 0, 0, 31, 32, - 18, 19, 0, 0, 20, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 11, 12, 14, 15, 16, - 21, 23, 25, 26, 27, 28, 29, 33, 34, 105, - 0, 0, 13, 0, 0, 0, 24, 174, 30, 0, - 0, 31, 32, 0, 0, 0, 0, 0, 105, 0, - 0, 0, 0, 0, 0, 0, 87, 89, 90, 0, - 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - 0, 101, 102, 104, 88, 87, 89, 90, 0, 91, - 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, - 101, 102, 104, 88, 105, 0, 0, 0, 103, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 105, 0, 0, 0, 103, 0, 0, - 0, 87, 89, 90, 0, 91, 92, 93, 0, 95, + 74, 0, 0, 0, 13, 0, 0, 0, 24, 0, + 30, 0, 55, 31, 32, 53, 77, 0, 56, 330, + 0, 22, 54, 0, 0, 0, 0, 0, 57, 0, + 278, 279, 277, 284, 286, 283, 285, 280, 281, 282, + 287, 0, 0, 0, 75, 0, 0, 0, 0, 0, + 18, 19, 0, 0, 20, 0, 0, 0, 17, 77, + 76, 0, 0, 0, 22, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, + 0, 0, 13, 0, 0, 0, 24, 0, 30, 0, + 0, 31, 32, 18, 19, 0, 0, 20, 0, 0, + 0, 17, 35, 0, 0, 0, 0, 22, 11, 12, + 14, 15, 16, 21, 23, 25, 26, 27, 28, 29, + 33, 34, 0, 0, 0, 13, 0, 0, 0, 24, + 0, 30, 0, 0, 31, 32, 18, 19, 0, 0, + 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 12, 14, 15, 16, 21, 23, 25, 26, + 27, 28, 29, 33, 34, 105, 0, 0, 13, 0, + 0, 0, 24, 174, 30, 0, 0, 31, 32, 0, + 0, 0, 0, 0, 105, 0, 0, 0, 0, 0, + 0, 0, 87, 89, 90, 0, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, + 88, 87, 89, 90, 0, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, 101, 102, 104, 88, - 87, 89, 90, 0, 91, 92, 0, 0, 95, 96, - 0, 98, 99, 100, 0, 101, 102, 104, 88, 0, - 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, + 105, 0, 0, 0, 103, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 105, + 0, 0, 0, 103, 0, 0, 0, 87, 89, 90, + 0, 91, 92, 93, 0, 95, 96, 97, 98, 99, + 100, 0, 101, 102, 104, 88, 87, 89, 90, 0, + 91, 92, 0, 0, 95, 96, 0, 98, 99, 100, + 0, 101, 102, 104, 88, 0, 0, 0, 0, 103, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 103, + 0, 0, 0, 0, 0, 0, 0, 0, 103, } var yyPact = [...]int16{ - 27, 190, 533, 533, 155, 490, -1000, -1000, -1000, 195, + 32, 106, 569, 569, 405, 526, -1000, -1000, -1000, 105, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 264, -1000, 268, -1000, 614, + -1000, -1000, -1000, -1000, -1000, 277, -1000, 297, -1000, 650, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 23, 177, -1000, -1000, 373, -1000, 373, 180, + -1000, -1000, 22, 95, -1000, -1000, 483, -1000, 483, 101, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 159, -1000, -1000, - 280, -1000, -1000, 323, -1000, 29, -1000, -56, -56, -56, - -56, -56, -56, -56, -56, -56, -56, -56, -56, -56, - -56, -56, -56, 49, 43, 192, 177, -61, -1000, 174, - 174, 8, -1000, 595, 5, -1000, 270, -1000, -1000, 131, - 187, -1000, -1000, -1000, 263, -1000, 156, -1000, 269, 373, - -1000, -43, -38, -1000, 373, 373, 373, 373, 373, 373, - 373, 373, 373, 373, 373, 373, 373, 373, 373, -1000, - 254, -1000, -1000, 151, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 226, 226, 101, -1000, -1000, -1000, -1000, 447, -1000, - -1000, 47, -1000, 614, -1000, -1000, 157, -1000, 109, -1000, - -1000, -1000, -1000, -1000, 93, -1000, -1000, -1000, -1000, -1000, - 22, 73, 60, -1000, -1000, -1000, 372, 250, 174, 174, - 174, 174, 5, 5, 491, 491, 491, 679, 660, 491, - 491, 679, 5, 5, 491, 5, 250, -1000, 68, -1000, - -1000, -1000, 186, -1000, 176, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 167, -1000, -1000, + 281, -1000, -1000, 309, -1000, 23, -1000, -50, -50, -50, + -50, -50, -50, -50, -50, -50, -50, -50, -50, -50, + -50, -50, -50, 48, 174, 336, 95, -56, -1000, 262, + 262, 324, -1000, 631, 103, -1000, 280, -1000, -1000, 274, + 241, -1000, -1000, -1000, 187, -1000, 180, -1000, 159, 483, + -1000, -57, -40, -1000, 483, 483, 483, 483, 483, 483, + 483, 483, 483, 483, 483, 483, 483, 483, 483, -1000, + 165, -1000, -1000, 94, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 40, 40, 269, -1000, -1000, -1000, -1000, 155, -1000, + -1000, 41, -1000, 650, -1000, -1000, 31, -1000, 170, -1000, + -1000, -1000, -1000, -1000, 163, -1000, -1000, -1000, -1000, -1000, + 19, 144, 140, -1000, -1000, -1000, 404, 16, 262, 262, + 262, 262, 103, 103, 251, 251, 251, 715, 696, 251, + 251, 715, 103, 103, 251, 103, 16, -1000, 24, -1000, + -1000, -1000, 265, -1000, 189, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 373, - -1000, -1000, -1000, -1000, -1000, -1000, 91, 91, 20, 91, - 124, 124, 92, 95, -1000, -1000, 285, 283, 256, 255, - 233, 231, 230, 227, 210, 209, 208, -1000, -1000, -1000, - -1000, -1000, -1000, 102, -1000, -1000, -1000, 295, -1000, 614, - -1000, -1000, -1000, 91, -1000, 18, 14, 443, -1000, -1000, - -1000, 41, 48, 226, 226, 226, 158, 158, 41, 158, - 41, -58, -1000, -1000, -1000, -1000, -1000, 91, 91, -1000, - -1000, -1000, 91, -1000, -1000, -1000, -1000, -1000, -1000, 226, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 26, -1000, 178, -1000, -1000, -1000, -1000, + 483, -1000, -1000, -1000, -1000, -1000, -1000, 34, 34, 18, + 34, 44, 44, 110, 38, -1000, -1000, 285, 267, 260, + 240, 236, 235, 234, 206, 188, 134, 129, -1000, -1000, + -1000, -1000, -1000, -1000, 102, -1000, -1000, -1000, 14, -1000, + 650, -1000, -1000, -1000, 34, -1000, 12, 9, 482, -1000, + -1000, -1000, 51, 81, 40, 40, 40, 97, 97, 51, + 97, 51, -73, -1000, -1000, -1000, -1000, -1000, 34, 34, + -1000, -1000, -1000, 34, -1000, -1000, -1000, -1000, -1000, -1000, + 40, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 104, -1000, 33, -1000, -1000, -1000, -1000, } var yyPgo = [...]int16{ - 0, 395, 13, 394, 6, 15, 393, 328, 390, 387, - 381, 380, 286, 294, 372, 14, 371, 10, 11, 356, - 353, 8, 352, 3, 4, 351, 2, 1, 0, 350, - 12, 5, 349, 348, 16, 157, 347, 345, 7, 344, - 341, 31, 339, 32, 338, 9, 337, 336, 335, 320, - 319, 312, 293, 301, 295, + 0, 379, 13, 378, 6, 15, 377, 344, 376, 374, + 373, 370, 198, 294, 369, 14, 368, 10, 11, 367, + 366, 8, 364, 3, 4, 363, 2, 1, 0, 362, + 12, 5, 361, 360, 18, 158, 359, 358, 7, 357, + 354, 17, 353, 31, 352, 9, 351, 350, 340, 332, + 327, 326, 314, 321, 302, } var yyR1 = [...]int8{ @@ -542,25 +547,25 @@ var yyR1 = [...]int8{ 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 35, 37, 37, 47, 47, 42, 42, 42, 42, 17, 17, 17, 17, 16, 16, 16, 4, 4, - 39, 41, 41, 40, 40, 40, 48, 46, 46, 46, - 32, 32, 32, 9, 9, 44, 50, 50, 50, 50, - 50, 50, 51, 52, 52, 52, 43, 43, 43, 1, - 1, 1, 2, 2, 2, 2, 2, 2, 2, 13, - 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 4, 39, 41, 41, 40, 40, 40, 48, 46, 46, + 46, 32, 32, 32, 9, 9, 44, 50, 50, 50, + 50, 50, 50, 51, 52, 52, 52, 43, 43, 43, + 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, + 13, 13, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 12, 12, 12, 12, 14, - 14, 14, 15, 15, 15, 15, 54, 20, 20, 20, - 20, 19, 19, 19, 19, 19, 19, 19, 19, 19, - 29, 29, 29, 21, 21, 21, 21, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 24, 24, 25, 25, 25, 11, 11, 11, 11, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 12, 12, 12, 12, + 14, 14, 14, 15, 15, 15, 15, 54, 20, 20, + 20, 20, 19, 19, 19, 19, 19, 19, 19, 19, + 19, 29, 29, 29, 21, 21, 21, 21, 22, 22, + 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 24, 24, 25, 25, 25, 11, 11, 11, + 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 8, 8, 5, 5, 5, 5, 45, - 45, 28, 28, 30, 30, 31, 31, 27, 26, 26, - 49, 10, 18, 18, + 6, 6, 6, 6, 8, 8, 5, 5, 5, 5, + 45, 45, 28, 28, 30, 30, 31, 31, 27, 26, + 26, 49, 10, 18, 18, } var yyR2 = [...]int8{ @@ -570,25 +575,25 @@ var yyR2 = [...]int8{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 0, 1, 3, 3, 1, 1, 3, 3, 3, 4, 2, 1, 3, 1, 2, 1, 1, - 2, 3, 2, 3, 1, 2, 3, 3, 4, 3, - 3, 5, 3, 1, 1, 4, 6, 5, 6, 5, - 4, 3, 2, 2, 1, 1, 3, 4, 2, 3, - 1, 2, 3, 3, 1, 3, 3, 2, 1, 2, + 1, 2, 3, 2, 3, 1, 2, 3, 3, 4, + 3, 3, 5, 3, 1, 1, 4, 6, 5, 6, + 5, 4, 3, 2, 2, 1, 1, 3, 4, 2, + 3, 1, 2, 3, 3, 1, 3, 3, 2, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 3, 4, 2, 0, 3, - 1, 2, 3, 3, 2, 1, 2, 0, 3, 2, - 1, 1, 3, 1, 3, 4, 1, 3, 5, 5, - 1, 1, 1, 4, 3, 3, 2, 3, 1, 2, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 3, 4, 2, 0, + 3, 1, 2, 3, 3, 2, 1, 2, 0, 3, + 2, 1, 1, 3, 1, 3, 4, 1, 3, 5, + 5, 1, 1, 1, 4, 3, 3, 2, 3, 1, + 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 4, 3, 3, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, - 1, 1, 0, 1, + 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, + 1, 1, 1, 0, 1, } var yyChk = [...]int16{ @@ -614,59 +619,59 @@ var yyChk = [...]int16{ 5, -29, -21, 12, -28, -30, 16, -38, 82, 84, 80, 81, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -38, -45, 15, -28, - -28, 21, 6, 2, -16, 22, -4, -6, 2, 62, - 78, 63, 79, 64, 65, 66, 80, 81, 12, 82, - 47, 48, 51, 67, 18, 68, 83, 84, 69, 70, - 71, 72, 73, 88, 89, 59, 74, 75, 22, 7, - 20, -2, 25, 2, 25, 2, 26, 26, -30, 26, - 41, 57, -22, 24, 17, -23, 30, 28, 29, 35, - 36, 37, 33, 31, 34, 32, 38, -17, -17, -18, - -17, -18, 22, -45, 21, 2, 22, 7, 2, -38, - -27, 19, -27, 26, -27, -21, -21, 24, 17, 2, - 17, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 21, 2, 22, -4, -27, 26, 26, 17, - -23, -26, 57, -27, -31, -28, -28, -28, -24, 14, - -24, -26, -24, -26, -11, 92, 93, 94, 95, -27, - -27, -27, -25, -28, 24, 21, 2, 21, -28, + -28, 21, 6, 2, -16, 22, -4, -6, 25, 2, + 62, 78, 63, 79, 64, 65, 66, 80, 81, 12, + 82, 47, 48, 51, 67, 18, 68, 83, 84, 69, + 70, 71, 72, 73, 88, 89, 59, 74, 75, 22, + 7, 20, -2, 25, 2, 25, 2, 26, 26, -30, + 26, 41, 57, -22, 24, 17, -23, 30, 28, 29, + 35, 36, 37, 33, 31, 34, 32, 38, -17, -17, + -18, -17, -18, 22, -45, 21, 2, 22, 7, 2, + -38, -27, 19, -27, 26, -27, -21, -21, 24, 17, + 2, 17, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 21, 2, 22, -4, -27, 26, 26, + 17, -23, -26, 57, -27, -31, -28, -28, -28, -24, + 14, -24, -26, -24, -26, -11, 92, 93, 94, 95, + -27, -27, -27, -25, -28, 24, 21, 2, 21, -28, } var yyDef = [...]int16{ - 0, -2, 128, 128, 0, 0, 7, 6, 1, 128, - 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, - 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, - 120, 121, 122, 123, 124, 0, 2, -2, 3, 4, + 0, -2, 129, 129, 0, 0, 7, 6, 1, 129, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 0, 2, -2, 3, 4, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, - 18, 19, 0, 107, 229, 230, 0, 240, 0, 84, - 85, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, 223, 224, 0, 5, 99, - 0, 127, 130, 0, 135, 136, 140, 43, 43, 43, + 18, 19, 0, 108, 230, 231, 0, 241, 0, 85, + 86, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -2, -2, 224, 225, 0, 5, 100, + 0, 128, 131, 0, 136, 137, 141, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 0, 0, 0, 0, 22, 23, 0, - 0, 0, 60, 0, 82, 83, 0, 88, 90, 0, - 94, 98, 241, 125, 0, 131, 0, 134, 139, 0, + 0, 0, 61, 0, 83, 84, 0, 89, 91, 0, + 95, 99, 242, 126, 0, 132, 0, 135, 140, 0, 42, 47, 48, 44, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, - 0, 69, 70, 0, 72, 235, 236, 73, 74, 231, - 232, 0, 0, 0, 81, 20, 21, 24, 0, 54, - 25, 0, 62, 64, 66, 86, 0, 91, 0, 97, - 225, 226, 227, 228, 0, 126, 129, 132, 133, 138, - 141, 143, 146, 150, 151, 152, 0, 26, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, + 0, 70, 71, 0, 73, 236, 237, 74, 75, 232, + 233, 0, 0, 0, 82, 20, 21, 24, 0, 54, + 25, 0, 63, 65, 67, 87, 0, 92, 0, 98, + 226, 227, 228, 229, 0, 127, 130, 133, 134, 139, + 142, 144, 147, 151, 152, 153, 0, 26, 0, 0, -2, -2, 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 68, 0, 233, - 234, 75, 0, 80, 0, 53, 56, 58, 59, 194, + 35, 36, 37, 38, 39, 40, 41, 69, 0, 234, + 235, 76, 0, 81, 0, 53, 56, 58, 59, 60, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, - 215, 216, 217, 218, 219, 220, 221, 222, 61, 65, - 87, 89, 92, 96, 93, 95, 0, 0, 0, 0, - 0, 0, 0, 0, 156, 158, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 45, 46, 49, - 243, 50, 71, 0, 77, 79, 51, 0, 57, 63, - 142, 237, 144, 0, 147, 0, 0, 0, 154, 159, - 155, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 76, 78, 52, 55, 145, 0, 0, 153, - 157, 160, 0, 239, 161, 162, 163, 164, 165, 0, - 166, 167, 168, 169, 170, 176, 177, 178, 179, 148, - 149, 238, 0, 174, 0, 172, 175, 171, 173, + 215, 216, 217, 218, 219, 220, 221, 222, 223, 62, + 66, 88, 90, 93, 97, 94, 96, 0, 0, 0, + 0, 0, 0, 0, 0, 157, 159, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 45, 46, + 49, 244, 50, 72, 0, 78, 80, 51, 0, 57, + 64, 143, 238, 145, 0, 148, 0, 0, 0, 155, + 160, 156, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 77, 79, 52, 55, 146, 0, 0, + 154, 158, 161, 0, 240, 162, 163, 164, 165, 166, + 0, 167, 168, 169, 170, 171, 177, 178, 179, 180, + 149, 150, 239, 0, 175, 0, 173, 176, 172, 174, } var yyTok1 = [...]int8{ @@ -1249,18 +1254,28 @@ yydefault: case 58: yyDollar = yyS[yypt-1 : yypt+1] { - if !isLabel(yyDollar[1].item.Val) { + if !model.LabelName(yyDollar[1].item.Val).IsValid() { yylex.(*parser).unexpected("grouping opts", "label") } yyVAL.item = yyDollar[1].item } case 59: + yyDollar = yyS[yypt-1 : yypt+1] + { + if !model.LabelName(yylex.(*parser).unquoteString(yyDollar[1].item.Val)).IsValid() { + yylex.(*parser).unexpected("grouping opts", "label") + } + yyVAL.item = yyDollar[1].item + yyVAL.item.Pos++ + yyVAL.item.Val = yylex.(*parser).unquoteString(yyVAL.item.Val) + } + case 60: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } - case 60: + case 61: yyDollar = yyS[yypt-2 : yypt+1] { fn, exist := getFunction(yyDollar[1].item.Val, yylex.(*parser).functions) @@ -1279,38 +1294,38 @@ yydefault: }, } } - case 61: + case 62: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = yyDollar[2].node } - case 62: + case 63: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = Expressions{} } - case 63: + case 64: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } - case 64: + case 65: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } - case 65: + case 66: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } - case 66: + case 67: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } - case 67: + case 68: yyDollar = yyS[yypt-3 : yypt+1] { numLit, _ := yyDollar[3].node.(*NumberLiteral) @@ -1318,7 +1333,7 @@ yydefault: yylex.(*parser).addOffset(yyDollar[1].node, dur) yyVAL.node = yyDollar[1].node } - case 68: + case 69: yyDollar = yyS[yypt-4 : yypt+1] { numLit, _ := yyDollar[4].node.(*NumberLiteral) @@ -1326,31 +1341,31 @@ yydefault: yylex.(*parser).addOffset(yyDollar[1].node, -dur) yyVAL.node = yyDollar[1].node } - case 69: + case 70: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("offset", "number or duration") yyVAL.node = yyDollar[1].node } - case 70: + case 71: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) yyVAL.node = yyDollar[1].node } - case 71: + case 72: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).setAtModifierPreprocessor(yyDollar[1].node, yyDollar[3].item) yyVAL.node = yyDollar[1].node } - case 72: + case 73: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("@", "timestamp") yyVAL.node = yyDollar[1].node } - case 75: + case 76: yyDollar = yyS[yypt-4 : yypt+1] { var errMsg string @@ -1375,7 +1390,7 @@ yydefault: EndPos: yylex.(*parser).lastClosing, } } - case 76: + case 77: yyDollar = yyS[yypt-6 : yypt+1] { numLitRange, _ := yyDollar[3].node.(*NumberLiteral) @@ -1387,7 +1402,7 @@ yydefault: EndPos: yyDollar[6].item.Pos + 1, } } - case 77: + case 78: yyDollar = yyS[yypt-5 : yypt+1] { numLitRange, _ := yyDollar[3].node.(*NumberLiteral) @@ -1398,31 +1413,31 @@ yydefault: EndPos: yyDollar[5].item.Pos + 1, } } - case 78: + case 79: yyDollar = yyS[yypt-6 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 79: + case 80: yyDollar = yyS[yypt-5 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number or duration or \"]\"") yyVAL.node = yyDollar[1].node } - case 80: + case 81: yyDollar = yyS[yypt-4 : yypt+1] { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 81: + case 82: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("subquery selector", "number or duration") yyVAL.node = yyDollar[1].node } - case 82: + case 83: yyDollar = yyS[yypt-2 : yypt+1] { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { @@ -1435,7 +1450,7 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 83: + case 84: yyDollar = yyS[yypt-2 : yypt+1] { vs := yyDollar[2].node.(*VectorSelector) @@ -1444,7 +1459,7 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 84: + case 85: yyDollar = yyS[yypt-1 : yypt+1] { vs := &VectorSelector{ @@ -1455,14 +1470,14 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 85: + case 86: yyDollar = yyS[yypt-1 : yypt+1] { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 86: + case 87: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1470,7 +1485,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 87: + case 88: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1478,7 +1493,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 88: + case 89: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.node = &VectorSelector{ @@ -1486,7 +1501,7 @@ yydefault: PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 89: + case 90: yyDollar = yyS[yypt-3 : yypt+1] { if yyDollar[1].matchers != nil { @@ -1495,38 +1510,32 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 90: + case 91: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 91: + case 92: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 92: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) - } case 93: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } case 94: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) + } + case 95: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.matcher = yylex.(*parser).newMetricNameMatcher(yyDollar[1].item) } - case 95: - yyDollar = yyS[yypt-3 : yypt+1] - { - yylex.(*parser).unexpected("label matching", "string") - yyVAL.matcher = nil - } case 96: yyDollar = yyS[yypt-3 : yypt+1] { @@ -1534,89 +1543,95 @@ yydefault: yyVAL.matcher = nil } case 97: + yyDollar = yyS[yypt-3 : yypt+1] + { + yylex.(*parser).unexpected("label matching", "string") + yyVAL.matcher = nil + } + case 98: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 98: + case 99: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 99: + case 100: yyDollar = yyS[yypt-2 : yypt+1] { b := labels.NewBuilder(yyDollar[2].labels) b.Set(labels.MetricName, yyDollar[1].item.Val) yyVAL.labels = b.Labels() } - case 100: + case 101: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.labels = yyDollar[1].labels } - case 125: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.labels = labels.New(yyDollar[2].lblList...) - } case 126: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.labels = labels.New(yyDollar[2].lblList...) } case 127: + yyDollar = yyS[yypt-4 : yypt+1] + { + yyVAL.labels = labels.New(yyDollar[2].lblList...) + } + case 128: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.labels = labels.New() } - case 128: + case 129: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.labels = labels.New() } - case 129: + case 130: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.lblList = append(yyDollar[1].lblList, yyDollar[3].label) } - case 130: + case 131: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.lblList = []labels.Label{yyDollar[1].label} } - case 131: + case 132: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.lblList = yyDollar[1].lblList } - case 132: + case 133: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 133: + case 134: yyDollar = yyS[yypt-3 : yypt+1] { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 134: + case 135: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 135: + case 136: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 136: + case 137: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).generatedParserResult = &seriesDescription{ @@ -1624,33 +1639,33 @@ yydefault: values: yyDollar[2].series, } } - case 137: + case 138: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.series = []SequenceValue{} } - case 138: + case 139: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 139: + case 140: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.series = yyDollar[1].series } - case 140: + case 141: yyDollar = yyS[yypt-1 : yypt+1] { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 141: + case 142: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 142: + case 143: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1658,12 +1673,12 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 143: + case 144: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 144: + case 145: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1672,7 +1687,7 @@ yydefault: yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 145: + case 146: yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1682,12 +1697,12 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 146: + case 147: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.series = []SequenceValue{{Histogram: yyDollar[1].histogram}} } - case 147: + case 148: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.series = []SequenceValue{} @@ -1697,7 +1712,7 @@ yydefault: //$1 += $2 } } - case 148: + case 149: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsIncreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1706,7 +1721,7 @@ yydefault: } yyVAL.series = val } - case 149: + case 150: yyDollar = yyS[yypt-5 : yypt+1] { val, err := yylex.(*parser).histogramsDecreaseSeries(yyDollar[1].histogram, yyDollar[3].histogram, yyDollar[5].uint) @@ -1715,7 +1730,7 @@ yydefault: } yyVAL.series = val } - case 150: + case 151: yyDollar = yyS[yypt-1 : yypt+1] { if yyDollar[1].item.Val != "stale" { @@ -1723,130 +1738,130 @@ yydefault: } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 153: - yyDollar = yyS[yypt-4 : yypt+1] - { - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) - } case 154: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 155: yyDollar = yyS[yypt-3 : yypt+1] { - m := yylex.(*parser).newMap() - yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&yyDollar[2].descriptors) } case 156: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { m := yylex.(*parser).newMap() yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) } case 157: + yyDollar = yyS[yypt-2 : yypt+1] + { + m := yylex.(*parser).newMap() + yyVAL.histogram = yylex.(*parser).buildHistogramFromMap(&m) + } + case 158: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = *(yylex.(*parser).mergeMaps(&yyDollar[1].descriptors, &yyDollar[3].descriptors)) } - case 158: + case 159: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.descriptors = yyDollar[1].descriptors } - case 159: + case 160: yyDollar = yyS[yypt-2 : yypt+1] { yylex.(*parser).unexpected("histogram description", "histogram description key, e.g. buckets:[5 10 7]") } - case 160: - yyDollar = yyS[yypt-3 : yypt+1] - { - yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["schema"] = yyDollar[3].int - } case 161: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["sum"] = yyDollar[3].float + yyVAL.descriptors["schema"] = yyDollar[3].int } case 162: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["count"] = yyDollar[3].float + yyVAL.descriptors["sum"] = yyDollar[3].float } case 163: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket"] = yyDollar[3].float + yyVAL.descriptors["count"] = yyDollar[3].float } case 164: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float + yyVAL.descriptors["z_bucket"] = yyDollar[3].float } case 165: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set + yyVAL.descriptors["z_bucket_w"] = yyDollar[3].float } case 166: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["custom_values"] = yyDollar[3].bucket_set } case 167: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["offset"] = yyDollar[3].int + yyVAL.descriptors["buckets"] = yyDollar[3].bucket_set } case 168: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set + yyVAL.descriptors["offset"] = yyDollar[3].int } case 169: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["n_offset"] = yyDollar[3].int + yyVAL.descriptors["n_buckets"] = yyDollar[3].bucket_set } case 170: yyDollar = yyS[yypt-3 : yypt+1] { yyVAL.descriptors = yylex.(*parser).newMap() - yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item + yyVAL.descriptors["n_offset"] = yyDollar[3].int } case 171: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = yyDollar[2].bucket_set + yyVAL.descriptors = yylex.(*parser).newMap() + yyVAL.descriptors["counter_reset_hint"] = yyDollar[3].item } case 172: - yyDollar = yyS[yypt-3 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] { yyVAL.bucket_set = yyDollar[2].bucket_set } case 173: yyDollar = yyS[yypt-3 : yypt+1] { - yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + yyVAL.bucket_set = yyDollar[2].bucket_set } case 174: + yyDollar = yyS[yypt-3 : yypt+1] + { + yyVAL.bucket_set = append(yyDollar[1].bucket_set, yyDollar[3].float) + } + case 175: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.bucket_set = []float64{yyDollar[1].float} } - case 229: + case 230: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &NumberLiteral{ @@ -1854,7 +1869,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 230: + case 231: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1868,12 +1883,12 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 231: + case 232: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 232: + case 233: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1884,17 +1899,17 @@ yydefault: } yyVAL.float = dur.Seconds() } - case 233: + case 234: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = yyDollar[2].float } - case 234: + case 235: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.float = -yyDollar[2].float } - case 237: + case 238: yyDollar = yyS[yypt-1 : yypt+1] { var err error @@ -1903,17 +1918,17 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 238: + case 239: yyDollar = yyS[yypt-2 : yypt+1] { yyVAL.int = -int64(yyDollar[2].uint) } - case 239: + case 240: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.int = int64(yyDollar[1].uint) } - case 240: + case 241: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.node = &StringLiteral{ @@ -1921,7 +1936,7 @@ yydefault: PosRange: yyDollar[1].item.PositionRange(), } } - case 241: + case 242: yyDollar = yyS[yypt-1 : yypt+1] { yyVAL.item = Item{ @@ -1930,7 +1945,7 @@ yydefault: Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), } } - case 242: + case 243: yyDollar = yyS[yypt-0 : yypt+1] { yyVAL.strings = nil diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 99b4b4644..d031e8330 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -617,6 +617,16 @@ func lexBuckets(l *Lexer) stateFn { l.bracketOpen = false l.emit(RIGHT_BRACKET) return lexHistogram + case isAlpha(r): + // Current word is Inf or NaN. + word := l.input[l.start:l.pos] + if desc, ok := key[strings.ToLower(word)]; ok { + if desc == NUMBER { + l.emit(desc) + return lexStatements + } + } + return lexBuckets default: return l.errorf("invalid character in buckets description: %q", r) } @@ -727,23 +737,23 @@ func lexValueSequence(l *Lexer) stateFn { // was only modified to integrate with our lexer. func lexEscape(l *Lexer) stateFn { var n int - var base, max uint32 + var base, maxVal uint32 ch := l.next() switch ch { case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', l.stringOpen: return lexString case '0', '1', '2', '3', '4', '5', '6', '7': - n, base, max = 3, 8, 255 + n, base, maxVal = 3, 8, 255 case 'x': ch = l.next() - n, base, max = 2, 16, 255 + n, base, maxVal = 2, 16, 255 case 'u': ch = l.next() - n, base, max = 4, 16, unicode.MaxRune + n, base, maxVal = 4, 16, unicode.MaxRune case 'U': ch = l.next() - n, base, max = 8, 16, unicode.MaxRune + n, base, maxVal = 8, 16, unicode.MaxRune case eof: l.errorf("escape sequence not terminated") return lexString @@ -772,7 +782,7 @@ func lexEscape(l *Lexer) stateFn { } } - if x > max || 0xD800 <= x && x < 0xE000 { + if x > maxVal || 0xD800 <= x && x < 0xE000 { l.errorf("escape sequence is an invalid Unicode code point") } return lexString @@ -1059,16 +1069,3 @@ func isDigit(r rune) bool { func isAlpha(r rune) bool { return r == '_' || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') } - -// isLabel reports whether the string can be used as label. -func isLabel(s string) bool { - if len(s) == 0 || !isAlpha(rune(s[0])) { - return false - } - for _, c := range s[1:] { - if !isAlphaNumeric(c) { - return false - } - } - return true -} diff --git a/promql/parser/lex_test.go b/promql/parser/lex_test.go index ac9aa2762..c5475a8b9 100644 --- a/promql/parser/lex_test.go +++ b/promql/parser/lex_test.go @@ -639,6 +639,29 @@ var tests = []struct { }, seriesDesc: true, }, + { + input: `{} {{buckets: [Inf NaN] schema:1}}`, + expected: []Item{ + {LEFT_BRACE, 0, `{`}, + {RIGHT_BRACE, 1, `}`}, + {SPACE, 2, ` `}, + {OPEN_HIST, 3, `{{`}, + {BUCKETS_DESC, 5, `buckets`}, + {COLON, 12, `:`}, + {SPACE, 13, ` `}, + {LEFT_BRACKET, 14, `[`}, + {NUMBER, 15, `Inf`}, + {SPACE, 18, ` `}, + {NUMBER, 19, `NaN`}, + {RIGHT_BRACKET, 22, `]`}, + {SPACE, 23, ` `}, + {SCHEMA_DESC, 24, `schema`}, + {COLON, 30, `:`}, + {NUMBER, 31, `1`}, + {CLOSE_HIST, 32, `}}`}, + }, + seriesDesc: true, + }, { // Series with sum as -Inf and count as NaN. input: `{} {{buckets: [5 10 7] sum:Inf count:NaN}}`, expected: []Item{ diff --git a/promql/parser/parse_test.go b/promql/parser/parse_test.go index 3c679e5b0..37748323c 100644 --- a/promql/parser/parse_test.go +++ b/promql/parser/parse_test.go @@ -2397,6 +2397,51 @@ var testExpr = []struct { }, }, }, + { + input: `sum by ("foo")({"some.metric"})`, + expected: &AggregateExpr{ + Op: SUM, + Expr: &VectorSelector{ + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"), + }, + PosRange: posrange.PositionRange{ + Start: 15, + End: 30, + }, + }, + Grouping: []string{"foo"}, + PosRange: posrange.PositionRange{ + Start: 0, + End: 31, + }, + }, + }, + { + input: `sum by ("foo)(some_metric{})`, + fail: true, + errMsg: "unterminated quoted string", + }, + { + input: `sum by ("foo", bar, 'baz')({"some.metric"})`, + expected: &AggregateExpr{ + Op: SUM, + Expr: &VectorSelector{ + LabelMatchers: []*labels.Matcher{ + MustLabelMatcher(labels.MatchEqual, model.MetricNameLabel, "some.metric"), + }, + PosRange: posrange.PositionRange{ + Start: 27, + End: 42, + }, + }, + Grouping: []string{"foo", "bar", "baz"}, + PosRange: posrange.PositionRange{ + Start: 0, + End: 43, + }, + }, + }, { input: "avg by (foo)(some_metric)", expected: &AggregateExpr{ @@ -3844,6 +3889,7 @@ func readable(s string) string { } func TestParseExpressions(t *testing.T) { + model.NameValidationScheme = model.UTF8Validation for _, test := range testExpr { t.Run(readable(test.input), func(t *testing.T) { expr, err := ParseExpr(test.input) diff --git a/promql/parser/printer.go b/promql/parser/printer.go index f3bdefdeb..63b195082 100644 --- a/promql/parser/printer.go +++ b/promql/parser/printer.go @@ -72,27 +72,48 @@ func (node *AggregateExpr) String() string { return aggrString } +func (node *AggregateExpr) ShortString() string { + aggrString := node.getAggOpStr() + return aggrString +} + func (node *AggregateExpr) getAggOpStr() string { aggrString := node.Op.String() switch { case node.Without: - aggrString += fmt.Sprintf(" without (%s) ", strings.Join(node.Grouping, ", ")) + aggrString += fmt.Sprintf(" without (%s) ", joinLabels(node.Grouping)) case len(node.Grouping) > 0: - aggrString += fmt.Sprintf(" by (%s) ", strings.Join(node.Grouping, ", ")) + aggrString += fmt.Sprintf(" by (%s) ", joinLabels(node.Grouping)) } return aggrString } -func (node *BinaryExpr) String() string { - returnBool := "" - if node.ReturnBool { - returnBool = " bool" +func joinLabels(ss []string) string { + for i, s := range ss { + // If the label is already quoted, don't quote it again. + if s[0] != '"' && s[0] != '\'' && s[0] != '`' && !model.IsValidLegacyMetricName(string(model.LabelValue(s))) { + ss[i] = fmt.Sprintf("\"%s\"", s) + } } + return strings.Join(ss, ", ") +} +func (node *BinaryExpr) returnBool() string { + if node.ReturnBool { + return " bool" + } + return "" +} + +func (node *BinaryExpr) String() string { matching := node.getMatchingStr() - return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, returnBool, matching, node.RHS) + return fmt.Sprintf("%s %s%s%s %s", node.LHS, node.Op, node.returnBool(), matching, node.RHS) +} + +func (node *BinaryExpr) ShortString() string { + return fmt.Sprintf("%s%s%s", node.Op, node.returnBool(), node.getMatchingStr()) } func (node *BinaryExpr) getMatchingStr() string { @@ -120,9 +141,13 @@ func (node *Call) String() string { return fmt.Sprintf("%s(%s)", node.Func.Name, node.Args) } -func (node *MatrixSelector) String() string { +func (node *Call) ShortString() string { + return node.Func.Name +} + +func (node *MatrixSelector) atOffset() (string, string) { // Copy the Vector selector before changing the offset - vecSelector := *node.VectorSelector.(*VectorSelector) + vecSelector := node.VectorSelector.(*VectorSelector) offset := "" switch { case vecSelector.OriginalOffset > time.Duration(0): @@ -139,7 +164,13 @@ func (node *MatrixSelector) String() string { case vecSelector.StartOrEnd == END: at = " @ end()" } + return at, offset +} +func (node *MatrixSelector) String() string { + at, offset := node.atOffset() + // Copy the Vector selector before changing the offset + vecSelector := *node.VectorSelector.(*VectorSelector) // Do not print the @ and offset twice. offsetVal, atVal, preproc := vecSelector.OriginalOffset, vecSelector.Timestamp, vecSelector.StartOrEnd vecSelector.OriginalOffset = 0 @@ -153,10 +184,19 @@ func (node *MatrixSelector) String() string { return str } +func (node *MatrixSelector) ShortString() string { + at, offset := node.atOffset() + return fmt.Sprintf("[%s]%s%s", model.Duration(node.Range), at, offset) +} + func (node *SubqueryExpr) String() string { return fmt.Sprintf("%s%s", node.Expr.String(), node.getSubqueryTimeSuffix()) } +func (node *SubqueryExpr) ShortString() string { + return node.getSubqueryTimeSuffix() +} + // getSubqueryTimeSuffix returns the '[:] @ offset ' suffix of the subquery. func (node *SubqueryExpr) getSubqueryTimeSuffix() string { step := "" @@ -198,6 +238,10 @@ func (node *UnaryExpr) String() string { return fmt.Sprintf("%s%s", node.Op, node.Expr) } +func (node *UnaryExpr) ShortString() string { + return node.Op.String() +} + func (node *VectorSelector) String() string { var labelStrings []string if len(node.LabelMatchers) > 1 { diff --git a/promql/parser/printer_test.go b/promql/parser/printer_test.go index d2e301a88..0a557ad59 100644 --- a/promql/parser/printer_test.go +++ b/promql/parser/printer_test.go @@ -16,6 +16,7 @@ package parser import ( "testing" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/model/labels" @@ -44,6 +45,14 @@ func TestExprString(t *testing.T) { in: `sum without(instance) (task:errors:rate10s{job="s"})`, out: `sum without (instance) (task:errors:rate10s{job="s"})`, }, + { + in: `sum by("foo.bar") (task:errors:rate10s{job="s"})`, + out: `sum by ("foo.bar") (task:errors:rate10s{job="s"})`, + }, + { + in: `sum without("foo.bar") (task:errors:rate10s{job="s"})`, + out: `sum without ("foo.bar") (task:errors:rate10s{job="s"})`, + }, { in: `topk(5, task:errors:rate10s{job="s"})`, }, @@ -157,6 +166,8 @@ func TestExprString(t *testing.T) { }, } + model.NameValidationScheme = model.UTF8Validation + for _, test := range inputs { expr, err := ParseExpr(test.in) require.NoError(t, err) diff --git a/promql/promql_test.go b/promql/promql_test.go index a423f90ee..345ecab5e 100644 --- a/promql/promql_test.go +++ b/promql/promql_test.go @@ -28,12 +28,12 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) -func newTestEngine() *promql.Engine { - return promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery) +func newTestEngine(t *testing.T) *promql.Engine { + return promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery) } func TestEvaluations(t *testing.T) { - promqltest.RunBuiltinTests(t, newTestEngine()) + promqltest.RunBuiltinTests(t, newTestEngine(t)) } // Run a lot of queries at the same time, to check for race conditions. @@ -48,7 +48,7 @@ func TestConcurrentRangeQueries(t *testing.T) { } // Enable experimental functions testing parser.EnableExperimentalFunctions = true - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) const interval = 10000 // 10s interval. // A day of data plus 10k steps. diff --git a/promql/promqltest/test.go b/promql/promqltest/test.go index 83137e661..ff709e442 100644 --- a/promql/promqltest/test.go +++ b/promql/promqltest/test.go @@ -79,8 +79,9 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage { return test.storage } -func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine { - return promql.NewEngine(promql.EngineOpts{ +// NewTestEngine creates a promql.Engine with enablePerStepStats, lookbackDelta and maxSamples, and returns it. +func NewTestEngine(tb testing.TB, enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine { + return NewTestEngineWithOpts(tb, promql.EngineOpts{ Logger: nil, Reg: nil, MaxSamples: maxSamples, @@ -90,9 +91,20 @@ func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamp EnableNegativeOffset: true, EnablePerStepStats: enablePerStepStats, LookbackDelta: lookbackDelta, + EnableDelayedNameRemoval: true, }) } +// NewTestEngineWithOpts creates a promql.Engine with opts and returns it. +func NewTestEngineWithOpts(tb testing.TB, opts promql.EngineOpts) *promql.Engine { + tb.Helper() + ng := promql.NewEngine(opts) + tb.Cleanup(func() { + require.NoError(tb, ng.Close()) + }) + return ng +} + // RunBuiltinTests runs an acceptance test suite against the provided engine. func RunBuiltinTests(t TBRun, engine promql.QueryEngine) { t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) @@ -650,8 +662,9 @@ type evalCmd struct { expectedFailMessage string expectedFailRegexp *regexp.Regexp - metrics map[uint64]labels.Labels - expected map[uint64]entry + metrics map[uint64]labels.Labels + expectScalar bool + expected map[uint64]entry } type entry struct { @@ -695,12 +708,15 @@ func (ev *evalCmd) String() string { // expect adds a sequence of values to the set of expected // results for the query. func (ev *evalCmd) expect(pos int, vals ...parser.SequenceValue) { + ev.expectScalar = true ev.expected[0] = entry{pos: pos, vals: vals} } // expectMetric adds a new metric with a sequence of values to the set of expected // results for the query. func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.SequenceValue) { + ev.expectScalar = false + h := m.Hash() ev.metrics[h] = m ev.expected[h] = entry{pos: pos, vals: vals} @@ -714,6 +730,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error { return fmt.Errorf("expected ordered result, but query returned a matrix") } + if ev.expectScalar { + return fmt.Errorf("expected scalar result, but got matrix %s", val.String()) + } + if err := assertMatrixSorted(val); err != nil { return err } @@ -769,7 +789,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s)) } - if !actual.H.Equals(expected.H.Compact(0)) { + if !compareNativeHistogram(expected.H.Compact(0), actual.H.Compact(0)) { return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s)) } } @@ -782,6 +802,10 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } case promql.Vector: + if ev.expectScalar { + return fmt.Errorf("expected scalar result, but got vector %s", val.String()) + } + seen := map[uint64]bool{} for pos, v := range val { fp := v.Metric.Hash() @@ -804,7 +828,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error { if expH != nil && v.H == nil { return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F) } - if expH != nil && !expH.Compact(0).Equals(v.H) { + if expH != nil && !compareNativeHistogram(expH.Compact(0), v.H.Compact(0)) { return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H)) } if !almost.Equal(exp0.Value, v.F, defaultEpsilon) { @@ -820,15 +844,15 @@ func (ev *evalCmd) compareResult(result parser.Value) error { } case promql.Scalar: - if len(ev.expected) != 1 { - return fmt.Errorf("expected vector result, but got scalar %s", val.String()) + if !ev.expectScalar { + return fmt.Errorf("expected vector or matrix result, but got %s", val.String()) } exp0 := ev.expected[0].vals[0] if exp0.Histogram != nil { - return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String()) + return fmt.Errorf("expected histogram %v but got %s", exp0.Histogram.TestExpression(), val.String()) } if !almost.Equal(exp0.Value, val.V, defaultEpsilon) { - return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value) + return fmt.Errorf("expected scalar %v but got %v", exp0.Value, val.V) } default: @@ -837,6 +861,121 @@ func (ev *evalCmd) compareResult(result parser.Value) error { return nil } +// compareNativeHistogram is helper function to compare two native histograms +// which can tolerate some differ in the field of float type, such as Count, Sum. +func compareNativeHistogram(exp, cur *histogram.FloatHistogram) bool { + if exp == nil || cur == nil { + return false + } + + if exp.Schema != cur.Schema || + !almost.Equal(exp.Count, cur.Count, defaultEpsilon) || + !almost.Equal(exp.Sum, cur.Sum, defaultEpsilon) { + return false + } + + if exp.UsesCustomBuckets() { + if !histogram.FloatBucketsMatch(exp.CustomValues, cur.CustomValues) { + return false + } + } + + if exp.ZeroThreshold != cur.ZeroThreshold || + !almost.Equal(exp.ZeroCount, cur.ZeroCount, defaultEpsilon) { + return false + } + + if !spansMatch(exp.NegativeSpans, cur.NegativeSpans) { + return false + } + if !floatBucketsMatch(exp.NegativeBuckets, cur.NegativeBuckets) { + return false + } + + if !spansMatch(exp.PositiveSpans, cur.PositiveSpans) { + return false + } + if !floatBucketsMatch(exp.PositiveBuckets, cur.PositiveBuckets) { + return false + } + + return true +} + +func floatBucketsMatch(b1, b2 []float64) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if !almost.Equal(b, b2[i], defaultEpsilon) { + return false + } + } + return true +} + +func spansMatch(s1, s2 []histogram.Span) bool { + if len(s1) == 0 && len(s2) == 0 { + return true + } + + s1idx, s2idx := 0, 0 + for { + if s1idx >= len(s1) { + return allEmptySpans(s2[s2idx:]) + } + if s2idx >= len(s2) { + return allEmptySpans(s1[s1idx:]) + } + + currS1, currS2 := s1[s1idx], s2[s2idx] + s1idx++ + s2idx++ + if currS1.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ { + currS1.Offset += s1[s1idx].Offset + } + if s1idx < len(s1) { + currS1.Offset += s1[s1idx].Offset + currS1.Length = s1[s1idx].Length + s1idx++ + } + } + if currS2.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ { + currS2.Offset += s2[s2idx].Offset + } + if s2idx < len(s2) { + currS2.Offset += s2[s2idx].Offset + currS2.Length = s2[s2idx].Length + s2idx++ + } + } + + if currS1.Length == 0 && currS2.Length == 0 { + // The last spans of both set are zero length. Previous spans match. + return true + } + + if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length { + return false + } + } +} + +func allEmptySpans(s []histogram.Span) bool { + for _, ss := range s { + if ss.Length > 0 { + return false + } + } + return true +} + func (ev *evalCmd) checkExpectedFailure(actual error) error { if ev.expectedFailMessage != "" { if ev.expectedFailMessage != actual.Error() { @@ -1003,13 +1142,6 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err) } res := q.Exec(t.context) - countWarnings, _ := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) - } if res.Err != nil { if cmd.fail { return cmd.checkExpectedFailure(res.Err) @@ -1020,6 +1152,13 @@ func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error { if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) } + countWarnings, _ := res.Warnings.CountWarningsAndInfo() + if !cmd.warn && countWarnings > 0 { + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", cmd.expr, cmd.line, res.Warnings) + } + if cmd.warn && countWarnings == 0 { + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", cmd.expr, cmd.line) + } defer q.Close() if err := cmd.compareResult(res.Value); err != nil { @@ -1050,13 +1189,6 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq } defer q.Close() res := q.Exec(t.context) - countWarnings, _ := res.Warnings.CountWarningsAndInfo() - if !cmd.warn && countWarnings > 0 { - return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) - } - if cmd.warn && countWarnings == 0 { - return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) - } if res.Err != nil { if cmd.fail { if err := cmd.checkExpectedFailure(res.Err); err != nil { @@ -1070,6 +1202,13 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + countWarnings, _ := res.Warnings.CountWarningsAndInfo() + if !cmd.warn && countWarnings > 0 { + return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) + } + if cmd.warn && countWarnings == 0 { + return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) @@ -1247,6 +1386,7 @@ func (ll *LazyLoader) clear() error { NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(ll.SubqueryInterval) }, EnableAtModifier: ll.opts.EnableAtModifier, EnableNegativeOffset: ll.opts.EnableNegativeOffset, + EnableDelayedNameRemoval: true, } ll.queryEngine = promql.NewEngine(opts) @@ -1307,7 +1447,11 @@ func (ll *LazyLoader) Storage() storage.Storage { // Close closes resources associated with the LazyLoader. func (ll *LazyLoader) Close() error { ll.cancelCtx() - return ll.storage.Close() + err := ll.queryEngine.Close() + if sErr := ll.storage.Close(); sErr != nil { + return errors.Join(sErr, err) + } + return err } func makeInt64Pointer(val int64) *int64 { diff --git a/promql/promqltest/test_test.go b/promql/promqltest/test_test.go index faffb1dd1..5aff71fb1 100644 --- a/promql/promqltest/test_test.go +++ b/promql/promqltest/test_test.go @@ -554,11 +554,48 @@ eval range from 0 to 5m step 5m testmetric `, expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`, }, + "instant query with expected scalar result": { + input: ` + eval instant at 1m 3 + 3 + `, + }, + "instant query with unexpected scalar result": { + input: ` + eval instant at 1m 3 + 2 + `, + expectedError: `error in eval 3 (line 2): expected scalar 2 but got 3`, + }, + "instant query that returns a scalar but expects a vector": { + input: ` + eval instant at 1m 3 + {} 3 + `, + expectedError: `error in eval 3 (line 2): expected vector or matrix result, but got scalar: 3 @[60000]`, + }, + "instant query that returns a vector but expects a scalar": { + input: ` + eval instant at 1m vector(3) + 3 + `, + expectedError: `error in eval vector(3) (line 2): expected scalar result, but got vector {} => 3 @[60000]`, + }, + "range query that returns a matrix but expects a scalar": { + input: ` + eval range from 0 to 1m step 30s vector(3) + 3 + `, + expectedError: `error in eval vector(3) (line 2): expected scalar result, but got matrix {} => +3 @[0] +3 @[30000] +3 @[60000]`, + }, } for name, testCase := range testCases { t.Run(name, func(t *testing.T) { - err := runTest(t, testCase.input, NewTestEngine(false, 0, DefaultMaxSamplesPerQuery)) + err := runTest(t, testCase.input, NewTestEngine(t, false, 0, DefaultMaxSamplesPerQuery)) if testCase.expectedError == "" { require.NoError(t, err) diff --git a/promql/promqltest/testdata/aggregators.test b/promql/promqltest/testdata/aggregators.test index cbb255a12..68d2e735b 100644 --- a/promql/promqltest/testdata/aggregators.test +++ b/promql/promqltest/testdata/aggregators.test @@ -503,7 +503,7 @@ eval instant at 1m avg(data{test="-big"}) eval instant at 1m avg(data{test="bigzero"}) {} 0 -# Test summing extreme values. +# Test summing and averaging extreme values. clear load 10s @@ -529,21 +529,39 @@ load 10s eval instant at 1m sum(data{test="ten"}) {} 10 +eval instant at 1m avg(data{test="ten"}) + {} 2.5 + eval instant at 1m sum by (group) (data{test="pos_inf"}) {group="1"} Inf {group="2"} Inf +eval instant at 1m avg by (group) (data{test="pos_inf"}) + {group="1"} Inf + {group="2"} Inf + eval instant at 1m sum by (group) (data{test="neg_inf"}) {group="1"} -Inf {group="2"} -Inf +eval instant at 1m avg by (group) (data{test="neg_inf"}) + {group="1"} -Inf + {group="2"} -Inf + eval instant at 1m sum(data{test="inf_inf"}) {} NaN +eval instant at 1m avg(data{test="inf_inf"}) + {} NaN + eval instant at 1m sum by (group) (data{test="nan"}) {group="1"} NaN {group="2"} NaN +eval instant at 1m avg by (group) (data{test="nan"}) + {group="1"} NaN + {group="2"} NaN + clear # Test that aggregations are deterministic. diff --git a/promql/promqltest/testdata/functions.test b/promql/promqltest/testdata/functions.test index b8b36d91e..6e2b3630b 100644 --- a/promql/promqltest/testdata/functions.test +++ b/promql/promqltest/testdata/functions.test @@ -523,16 +523,16 @@ load 5m node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 0+10x10 eval_ordered instant at 50m sort_by_label(http_requests, "instance") - http_requests{group="production", instance="0", job="api-server"} 100 http_requests{group="canary", instance="0", job="api-server"} 300 - http_requests{group="production", instance="0", job="app-server"} 500 http_requests{group="canary", instance="0", job="app-server"} 700 - http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="production", instance="0", job="app-server"} 500 http_requests{group="canary", instance="1", job="api-server"} 400 - http_requests{group="production", instance="1", job="app-server"} 600 http_requests{group="canary", instance="1", job="app-server"} 800 - http_requests{group="production", instance="2", job="api-server"} 100 + http_requests{group="production", instance="1", job="api-server"} 200 + http_requests{group="production", instance="1", job="app-server"} 600 http_requests{group="canary", instance="2", job="api-server"} NaN + http_requests{group="production", instance="2", job="api-server"} 100 eval_ordered instant at 50m sort_by_label(http_requests, "instance", "group") http_requests{group="canary", instance="0", job="api-server"} 300 @@ -585,14 +585,14 @@ eval_ordered instant at 50m sort_by_label(http_requests, "job", "instance", "gro eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance") http_requests{group="production", instance="2", job="api-server"} 100 http_requests{group="canary", instance="2", job="api-server"} NaN - http_requests{group="canary", instance="1", job="app-server"} 800 http_requests{group="production", instance="1", job="app-server"} 600 - http_requests{group="canary", instance="1", job="api-server"} 400 http_requests{group="production", instance="1", job="api-server"} 200 - http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="1", job="app-server"} 800 + http_requests{group="canary", instance="1", job="api-server"} 400 http_requests{group="production", instance="0", job="app-server"} 500 - http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="production", instance="0", job="api-server"} 100 + http_requests{group="canary", instance="0", job="app-server"} 700 + http_requests{group="canary", instance="0", job="api-server"} 300 eval_ordered instant at 50m sort_by_label_desc(http_requests, "instance", "group") http_requests{group="production", instance="2", job="api-server"} 100 @@ -748,7 +748,6 @@ eval instant at 1m avg_over_time(metric6c[1m]) eval instant at 1m sum_over_time(metric6c[1m])/count_over_time(metric6c[1m]) {} NaN - eval instant at 1m avg_over_time(metric7[1m]) {} NaN @@ -783,6 +782,9 @@ load 10s eval instant at 1m sum_over_time(metric[1m]) {} 2 +eval instant at 1m avg_over_time(metric[1m]) + {} 0.5 + # Tests for stddev_over_time and stdvar_over_time. clear load 10s diff --git a/promql/promqltest/testdata/histograms.test b/promql/promqltest/testdata/histograms.test index 349a1e79c..47cba7993 100644 --- a/promql/promqltest/testdata/histograms.test +++ b/promql/promqltest/testdata/histograms.test @@ -482,3 +482,29 @@ load_with_nhcb 5m eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*_bucket"}) eval_fail instant at 50m histogram_quantile(0.99, {__name__=~"request_duration_seconds\\d*"}) + +# Histogram with constant buckets. +load_with_nhcb 1m + const_histogram_bucket{le="0.0"} 1 1 1 1 1 + const_histogram_bucket{le="1.0"} 1 1 1 1 1 + const_histogram_bucket{le="2.0"} 1 1 1 1 1 + const_histogram_bucket{le="+Inf"} 1 1 1 1 1 + +# There is no change to the bucket count over time, thus rate is 0 in each bucket. +eval instant at 5m rate(const_histogram_bucket[5m]) + {le="0.0"} 0 + {le="1.0"} 0 + {le="2.0"} 0 + {le="+Inf"} 0 + +# Native histograms do not represent empty buckets, so here the zeros are implicit. +eval instant at 5m rate(const_histogram[5m]) + {} {{schema:-53 sum:0 count:0 custom_values:[0.0 1.0 2.0]}} + +# Zero buckets mean no observations, so there is no value that observations fall below, +# which means that any quantile is a NaN. +eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_bucket[5m]))) + {} NaN + +eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) + {} NaN diff --git a/promql/promqltest/testdata/name_label_dropping.test b/promql/promqltest/testdata/name_label_dropping.test new file mode 100644 index 000000000..1f1dac360 --- /dev/null +++ b/promql/promqltest/testdata/name_label_dropping.test @@ -0,0 +1,84 @@ +# Test for __name__ label drop. +load 5m + metric{env="1"} 0 60 120 + another_metric{env="1"} 60 120 180 + +# Does not drop __name__ for vector selector +eval instant at 15m metric{env="1"} + metric{env="1"} 120 + +# Drops __name__ for unary operators +eval instant at 15m -metric + {env="1"} -120 + +# Drops __name__ for binary operators +eval instant at 15m metric + another_metric + {env="1"} 300 + +# Does not drop __name__ for binary comparison operators +eval instant at 15m metric <= another_metric + metric{env="1"} 120 + +# Drops __name__ for binary comparison operators with "bool" modifier +eval instant at 15m metric <= bool another_metric + {env="1"} 1 + +# Drops __name__ for vector-scalar operations +eval instant at 15m metric * 2 + {env="1"} 240 + +# Drops __name__ for instant-vector functions +eval instant at 15m clamp(metric, 0, 100) + {env="1"} 100 + +# Drops __name__ for range-vector functions +eval instant at 15m rate(metric{env="1"}[10m]) + {env="1"} 0.2 + +# Does not drop __name__ for last_over_time function +eval instant at 15m last_over_time(metric{env="1"}[10m]) + metric{env="1"} 120 + +# Drops name for other _over_time functions +eval instant at 15m max_over_time(metric{env="1"}[10m]) + {env="1"} 120 + +# Allows relabeling (to-be-dropped) __name__ via label_replace +eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") + {my_name="rate_metric", env="1"} 0.2 + {my_name="rate_another_metric", env="1"} 0.2 + +# Allows preserving __name__ via label_replace +eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") + rate_metric{env="1"} 0.2 + rate_another_metric{env="1"} 0.2 + +# Allows relabeling (to-be-dropped) __name__ via label_join +eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") + {my_name="metric", env="1"} 0.2 + {my_name="another_metric", env="1"} 0.2 + +# Allows preserving __name__ via label_join +eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") + metric_1{env="1"} 0.2 + another_metric_1{env="1"} 0.2 + +# Does not drop metric names fro aggregation operators +eval instant at 15m sum by (__name__, env) (metric{env="1"}) + metric{env="1"} 120 + +# Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) +# This is an accidental side effect of delayed __name__ label dropping +eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m])) + +# Aggregation operators aggregate metrics with same labelset and to-be-dropped names +# This is an accidental side effect of delayed __name__ label dropping +eval instant at 15m sum(rate({env="1"}[10m])) by (env) + {env="1"} 0.4 + +# Aggregationk operators propagate __name__ label dropping information +eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"})) + metric{env="1"} 120 + +eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) + {env="1"} 0.2 diff --git a/promql/promqltest/testdata/native_histograms.test b/promql/promqltest/testdata/native_histograms.test index 6a8189a54..71e102dce 100644 --- a/promql/promqltest/testdata/native_histograms.test +++ b/promql/promqltest/testdata/native_histograms.test @@ -718,6 +718,52 @@ eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4) eval instant at 10m histogram_sum(scalar(histogram_fraction(-Inf, +Inf, sum(histogram_fraction_4))) * histogram_fraction_4) {} 100 +# Apply multiplication and division operator to histogram. +load 10m + histogram_mul_div {{schema:0 count:21 sum:33 z_bucket:3 z_bucket_w:0.001 buckets:[3 3 3] n_buckets:[3 3 3]}}x1 + float_series_3 3+0x1 + float_series_0 0+0x1 + +eval instant at 10m histogram_mul_div*3 + {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + +eval instant at 10m 3*histogram_mul_div + {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + +eval instant at 10m histogram_mul_div*float_series_3 + {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + +eval instant at 10m float_series_3*histogram_mul_div + {} {{schema:0 count:63 sum:99 z_bucket:9 z_bucket_w:0.001 buckets:[9 9 9] n_buckets:[9 9 9]}} + +eval instant at 10m histogram_mul_div/3 + {} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}} + +eval instant at 10m histogram_mul_div/float_series_3 + {} {{schema:0 count:7 sum:11 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 1] n_buckets:[1 1 1]}} + +eval instant at 10m histogram_mul_div*0 + {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} + +eval instant at 10m 0*histogram_mul_div + {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} + +eval instant at 10m histogram_mul_div*float_series_0 + {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} + +eval instant at 10m float_series_0*histogram_mul_div + {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} + +# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934 +eval instant at 10m histogram_mul_div/0 + {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + +eval instant at 10m histogram_mul_div/float_series_0 + {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + +eval instant at 10m histogram_mul_div*0/0 + {} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}} + clear # Counter reset only noticeable in a single bucket. @@ -748,3 +794,177 @@ eval instant at 5m histogram_quantile(0.5, custom_buckets_histogram) eval instant at 5m sum(custom_buckets_histogram) {} {{schema:-53 sum:5 count:4 custom_values:[5 10] buckets:[1 2 1]}} + +clear + +# Test 'this native histogram metric is not a gauge' warning for rate +load 30s + some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} + +# Test the case where we only have two points for rate +eval_warn instant at 30s rate(some_metric[30s]) + {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} + +# Test the case where we have more than two points for rate +eval_warn instant at 1m rate(some_metric[1m]) + {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} + +clear + +# Test rate() over mixed exponential and custom buckets. +load 30s + some_metric {{schema:0 sum:1 count:1 buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# Start and end with exponential, with custom in the middle. +eval_warn instant at 1m rate(some_metric[1m]) + # Should produce no results. + +# Start and end with custom, with exponential in the middle. +eval_warn instant at 1m30s rate(some_metric[1m]) + # Should produce no results. + +# Start with custom, end with exponential. +eval_warn instant at 1m rate(some_metric[30s]) + # Should produce no results. + +# Start with exponential, end with custom. +eval_warn instant at 30s rate(some_metric[30s]) + # Should produce no results. + +clear + +# Histogram with constant buckets. +load 1m + const_histogram {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} {{schema:0 sum:1 count:1 buckets:[1 1 1]}} + +# There is no change to the bucket count over time, thus rate is 0 in each bucket. +# However native histograms do not represent empty buckets, so here the zeros are implicit. +eval instant at 5m rate(const_histogram[5m]) + {} {{schema:0 sum:0 count:0}} + +# Zero buckets mean no observations, thus the denominator in the average is 0 +# leading to 0/0, which is NaN. +eval instant at 5m histogram_avg(rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so count is 0. +eval instant at 5m histogram_count(rate(const_histogram[5m])) + {} 0.0 + +# Zero buckets mean no observations and empty histogram has a sum of 0 by definition. +eval instant at 5m histogram_sum(rate(const_histogram[5m])) + {} 0.0 + +# Zero buckets mean no observations, thus the denominator in the fraction is 0, +# leading to 0/0, which is NaN. +eval instant at 5m histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) + {} NaN + +# Workaround to calculate the observation count corresponding to NaN fraction. +eval instant at 5m histogram_count(rate(const_histogram[5m])) == 0.0 or histogram_fraction(0.0, 1.0, rate(const_histogram[5m])) * histogram_count(rate(const_histogram[5m])) + {} 0.0 + +# Zero buckets mean no observations, so there is no value that observations fall below, +# which means that any quantile is a NaN. +eval instant at 5m histogram_quantile(1.0, rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so there is no standard deviation. +eval instant at 5m histogram_stddev(rate(const_histogram[5m])) + {} NaN + +# Zero buckets mean no observations, so there is no standard variance. +eval instant at 5m histogram_stdvar(rate(const_histogram[5m])) + {} NaN + +clear + +# Test mixing exponential and custom buckets. +load 6m + metric{series="exponential"} {{sum:4 count:3 buckets:[1 2 1]}} _ {{sum:4 count:3 buckets:[1 2 1]}} + metric{series="other-exponential"} {{sum:3 count:2 buckets:[1 1 1]}} _ {{sum:3 count:2 buckets:[1 1 1]}} + metric{series="custom"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="other-custom"} _ {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} {{schema:-53 sum:15 count:2 custom_values:[5 10] buckets:[0 2]}} + +# T=0: only exponential +# T=6: only custom +# T=12: mixed, should be ignored and emit a warning +eval_warn range from 0 to 12m step 6m sum(metric) + {} {{sum:7 count:5 buckets:[2 3 2]}} {{schema:-53 sum:16 count:3 custom_values:[5 10] buckets:[1 2]}} _ + +eval_warn range from 0 to 12m step 6m avg(metric) + {} {{sum:3.5 count:2.5 buckets:[1 1.5 1]}} {{schema:-53 sum:8 count:1.5 custom_values:[5 10] buckets:[0.5 1]}} _ + +clear + +# Test incompatible custom bucket schemas. +load 6m + metric{series="1"} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} _ {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{series="3"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + +# T=0: incompatible, should be ignored and emit a warning +# T=6: compatible +# T=12: incompatible followed by compatible, should be ignored and emit a warning +eval_warn range from 0 to 12m step 6m sum(metric) + {} _ {{schema:-53 sum:2 count:2 custom_values:[5 10] buckets:[2]}} _ + +eval_warn range from 0 to 12m step 6m avg(metric) + {} _ {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} _ + +clear + +load 1m + metric{group="just-floats", series="1"} 2 + metric{group="just-floats", series="2"} 3 + metric{group="just-exponential-histograms", series="1"} {{sum:3 count:4 buckets:[1 2 1]}} + metric{group="just-exponential-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}} + metric{group="just-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + metric{group="just-custom-histograms", series="2"} {{schema:-53 sum:3 count:4 custom_values:[2] buckets:[7]}} + metric{group="floats-and-histograms", series="1"} 2 + metric{group="floats-and-histograms", series="2"} {{sum:2 count:3 buckets:[1 1 1]}} + metric{group="exponential-and-custom-histograms", series="1"} {{sum:2 count:3 buckets:[1 1 1]}} + metric{group="exponential-and-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{group="incompatible-custom-histograms", series="1"} {{schema:-53 sum:1 count:1 custom_values:[5 10] buckets:[1]}} + metric{group="incompatible-custom-histograms", series="2"} {{schema:-53 sum:1 count:1 custom_values:[2] buckets:[1]}} + +eval_warn instant at 0 sum by (group) (metric) + {group="just-floats"} 5 + {group="just-exponential-histograms"} {{sum:5 count:7 buckets:[2 3 2]}} + {group="just-custom-histograms"} {{schema:-53 sum:4 count:5 custom_values:[2] buckets:[8]}} + +clear + +# Test native histograms with sum, count, avg. +load 10m + histogram_sum{idx="0"} {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}}x1 + histogram_sum{idx="1"} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sum{idx="2"} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}}x1 + histogram_sum{idx="3"} {{schema:1 count:0}}x1 + histogram_sum_float{idx="0"} 42.0x1 + +eval instant at 10m sum(histogram_sum) + {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} + +eval_warn instant at 10m sum({idx="0"}) + +eval instant at 10m sum(histogram_sum{idx="0"} + ignoring(idx) histogram_sum{idx="1"} + ignoring(idx) histogram_sum{idx="2"} + ignoring(idx) histogram_sum{idx="3"}) + {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} + +eval instant at 10m count(histogram_sum) + {} 4 + +eval instant at 10m avg(histogram_sum) + {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} + +clear + +# Test native histograms with sum_over_time, avg_over_time. +load 1m + histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}} + +eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m]) + {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} + +eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m]) + {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} diff --git a/promql/value.go b/promql/value.go index f129137d8..f25dbcd78 100644 --- a/promql/value.go +++ b/promql/value.go @@ -68,6 +68,9 @@ type Series struct { Metric labels.Labels `json:"metric"` Floats []FPoint `json:"values,omitempty"` Histograms []HPoint `json:"histograms,omitempty"` + // DropName is used to indicate whether the __name__ label should be dropped + // as part of the query evaluation. + DropName bool `json:"-"` } func (s Series) String() string { @@ -194,6 +197,9 @@ type Sample struct { H *histogram.FloatHistogram Metric labels.Labels + // DropName is used to indicate whether the __name__ label should be dropped + // as part of the query evaluation. + DropName bool } func (s Sample) String() string { diff --git a/rules/alerting_test.go b/rules/alerting_test.go index 5ebd049f6..67d683c85 100644 --- a/rules/alerting_test.go +++ b/rules/alerting_test.go @@ -36,16 +36,19 @@ import ( "github.com/prometheus/prometheus/util/testutil" ) -var testEngine = promql.NewEngine(promql.EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, - NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 }, - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: true, -}) +func testEngine(tb testing.TB) *promql.Engine { + tb.Helper() + return promqltest.NewTestEngineWithOpts(tb, promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: true, + }) +} func TestAlertingRuleState(t *testing.T) { tests := []struct { @@ -225,12 +228,14 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { }, } + ng := testEngine(t) + baseTime := time.Unix(0, 0) for i, result := range results { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) result[0].T = timestamp.FromTime(evalTime) - res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -247,7 +252,7 @@ func TestAlertingRuleLabelsUpdate(t *testing.T) { testutil.RequireEqual(t, result, filteredRes) } evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) - res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) require.Empty(t, res) } @@ -309,13 +314,15 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { }, } + ng := testEngine(t) + evalTime := time.Unix(0, 0) result[0].T = timestamp.FromTime(evalTime) result[1].T = timestamp.FromTime(evalTime) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalLabels.Eval( - context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -329,7 +336,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) { } res, err = ruleWithExternalLabels.Eval( - context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -406,9 +413,11 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { result[0].T = timestamp.FromTime(evalTime) result[1].T = timestamp.FromTime(evalTime) + ng := testEngine(t) + var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := ruleWithoutExternalURL.Eval( - context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -422,7 +431,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) { } res, err = ruleWithExternalURL.Eval( - context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -475,9 +484,11 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) { evalTime := time.Unix(0, 0) result[0].T = timestamp.FromTime(evalTime) + ng := testEngine(t) + var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. res, err := rule.Eval( - context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0, + context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0, ) require.NoError(t, err) for _, smpl := range res { @@ -520,6 +531,8 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; ) evalTime := time.Unix(0, 0) + ng := testEngine(t) + startQueryCh := make(chan struct{}) getDoneCh := make(chan struct{}) slowQueryFunc := func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { @@ -533,7 +546,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }}; require.Fail(t, "unexpected blocking when template expanding.") } } - return EngineQueryFunc(testEngine, storage)(ctx, q, ts) + return EngineQueryFunc(ng, storage)(ctx, q, ts) } go func() { <-startQueryCh @@ -578,7 +591,7 @@ func TestAlertingRuleDuplicate(t *testing.T) { Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -642,13 +655,13 @@ func TestAlertingRuleLimit(t *testing.T) { ) evalTime := time.Unix(0, 0) - + ng := testEngine(t) for _, test := range tests { - switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { + switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, test.limit); { case err != nil: require.EqualError(t, err, test.err) case test.err != "": - t.Errorf("Expected errror %s, got none", test.err) + t.Errorf("Expected error %s, got none", test.err) } } } @@ -866,12 +879,13 @@ func TestKeepFiringFor(t *testing.T) { }, } + ng := testEngine(t) baseTime := time.Unix(0, 0) for i, result := range results { t.Logf("case %d", i) evalTime := baseTime.Add(time.Duration(i) * time.Minute) result[0].T = timestamp.FromTime(evalTime) - res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -888,7 +902,7 @@ func TestKeepFiringFor(t *testing.T) { testutil.RequireEqual(t, result, filteredRes) } evalTime := baseTime.Add(time.Duration(len(results)) * time.Minute) - res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) require.Empty(t, res) } @@ -923,9 +937,10 @@ func TestPendingAndKeepFiringFor(t *testing.T) { F: 1, } + ng := testEngine(t) baseTime := time.Unix(0, 0) result.T = timestamp.FromTime(baseTime) - res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, baseTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) require.Len(t, res, 2) @@ -940,7 +955,7 @@ func TestPendingAndKeepFiringFor(t *testing.T) { } evalTime := baseTime.Add(time.Minute) - res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err = rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) require.Empty(t, res) } diff --git a/rules/manager_test.go b/rules/manager_test.go index 9865cbdfe..b9f6db327 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -158,12 +158,13 @@ func TestAlertingRule(t *testing.T) { }, } + ng := testEngine(t) for i, test := range tests { t.Logf("case %d", i) evalTime := baseTime.Add(test.time) - res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples. @@ -299,6 +300,7 @@ func TestForStateAddSamples(t *testing.T) { }, } + ng := testEngine(t) var forState float64 for i, test := range tests { t.Logf("case %d", i) @@ -311,7 +313,7 @@ func TestForStateAddSamples(t *testing.T) { forState = float64(value.StaleNaN) } - res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(testEngine, storage), nil, 0) + res, err := rule.Eval(context.TODO(), queryOffset, evalTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) var filteredRes promql.Vector // After removing 'ALERTS' samples. @@ -366,8 +368,9 @@ func TestForStateRestore(t *testing.T) { expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`) require.NoError(t, err) + ng := testEngine(t) opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(testEngine, storage), + QueryFunc: EngineQueryFunc(ng, storage), Appendable: storage, Queryable: storage, Context: context.Background(), @@ -538,7 +541,7 @@ func TestStaleness(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(engineOpts) + engine := promqltest.NewTestEngineWithOpts(t, engineOpts) opts := &ManagerOptions{ QueryFunc: EngineQueryFunc(engine, st), Appendable: st, @@ -772,7 +775,7 @@ func TestUpdate(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ruleManager := NewManager(&ManagerOptions{ Appendable: st, Queryable: st, @@ -910,7 +913,7 @@ func TestNotify(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(engineOpts) + engine := promqltest.NewTestEngineWithOpts(t, engineOpts) var lastNotified []*Alert notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) { lastNotified = alerts @@ -985,7 +988,7 @@ func TestMetricsUpdate(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ruleManager := NewManager(&ManagerOptions{ Appendable: storage, Queryable: storage, @@ -1059,7 +1062,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ruleManager := NewManager(&ManagerOptions{ Appendable: storage, Queryable: storage, @@ -1136,7 +1139,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ruleManager := NewManager(&ManagerOptions{ Appendable: storage, Queryable: storage, @@ -1238,7 +1241,7 @@ func TestRuleHealthUpdates(t *testing.T) { MaxSamples: 10, Timeout: 10 * time.Second, } - engine := promql.NewEngine(engineOpts) + engine := promqltest.NewTestEngineWithOpts(t, engineOpts) opts := &ManagerOptions{ QueryFunc: EngineQueryFunc(engine, st), Appendable: st, @@ -1335,9 +1338,10 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) { }, } + ng := testEngine(t) testFunc := func(tst testInput) { opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(testEngine, storage), + QueryFunc: EngineQueryFunc(ng, storage), Appendable: storage, Queryable: storage, Context: context.Background(), @@ -1421,8 +1425,9 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) { } require.NoError(t, app.Commit()) + ng := testEngine(t) opts := &ManagerOptions{ - QueryFunc: EngineQueryFunc(testEngine, storage), + QueryFunc: EngineQueryFunc(ng, storage), Appendable: storage, Queryable: storage, Context: context.Background(), diff --git a/rules/recording_test.go b/rules/recording_test.go index fdddd4e02..72c0764f9 100644 --- a/rules/recording_test.go +++ b/rules/recording_test.go @@ -123,10 +123,11 @@ func TestRuleEval(t *testing.T) { storage := setUpRuleEvalTest(t) t.Cleanup(func() { storage.Close() }) + ng := testEngine(t) for _, scenario := range ruleEvalTestScenarios { t.Run(scenario.name, func(t *testing.T) { rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) - result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) + result, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0) require.NoError(t, err) testutil.RequireEqual(t, scenario.expected, result) }) @@ -137,6 +138,7 @@ func BenchmarkRuleEval(b *testing.B) { storage := setUpRuleEvalTest(b) b.Cleanup(func() { storage.Close() }) + ng := testEngine(b) for _, scenario := range ruleEvalTestScenarios { b.Run(scenario.name, func(b *testing.B) { rule := NewRecordingRule("test_rule", scenario.expr, scenario.ruleLabels) @@ -144,7 +146,7 @@ func BenchmarkRuleEval(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(testEngine, storage), nil, 0) + _, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0) if err != nil { require.NoError(b, err) } @@ -165,7 +167,7 @@ func TestRuleEvalDuplicate(t *testing.T) { Timeout: 10 * time.Second, } - engine := promql.NewEngine(opts) + engine := promqltest.NewTestEngineWithOpts(t, opts) ctx, cancelCtx := context.WithCancel(context.Background()) defer cancelCtx() @@ -212,10 +214,11 @@ func TestRecordingRuleLimit(t *testing.T) { labels.FromStrings("test", "test"), ) + ng := testEngine(t) evalTime := time.Unix(0, 0) for _, test := range tests { - switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(testEngine, storage), nil, test.limit); { + switch _, err := rule.Eval(context.TODO(), 0, evalTime, EngineQueryFunc(ng, storage), nil, test.limit); { case err != nil: require.EqualError(t, err, test.err) case test.err != "": diff --git a/scrape/clientprotobuf.go b/scrape/clientprotobuf.go index 2213268d5..e632035b4 100644 --- a/scrape/clientprotobuf.go +++ b/scrape/clientprotobuf.go @@ -23,7 +23,7 @@ import ( dto "github.com/prometheus/client_model/go" ) -// Write a MetricFamily into a protobuf. +// MetricFamilyToProtobuf writes a MetricFamily into a protobuf. // This function is intended for testing scraping by providing protobuf serialized input. func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { buffer := &bytes.Buffer{} @@ -34,7 +34,7 @@ func MetricFamilyToProtobuf(metricFamily *dto.MetricFamily) ([]byte, error) { return buffer.Bytes(), nil } -// Append a MetricFamily protobuf representation to a buffer. +// AddMetricFamilyToProtobuf appends a MetricFamily protobuf representation to a buffer. // This function is intended for testing scraping by providing protobuf serialized input. func AddMetricFamilyToProtobuf(buffer *bytes.Buffer, metricFamily *dto.MetricFamily) error { protoBuf, err := proto.Marshal(metricFamily) diff --git a/scrape/manager.go b/scrape/manager.go index 156e949f8..e3dba5f0e 100644 --- a/scrape/manager.go +++ b/scrape/manager.go @@ -93,6 +93,8 @@ type Options struct { skipOffsetting bool } +const DefaultNameEscapingScheme = model.ValueEncodingEscaping + // Manager maintains a set of scrape pools and manages start/stop cycles // when receiving new target groups from the discovery manager. type Manager struct { @@ -140,7 +142,7 @@ func (m *Manager) UnregisterMetrics() { func (m *Manager) reloader() { reloadIntervalDuration := m.opts.DiscoveryReloadInterval - if reloadIntervalDuration < model.Duration(5*time.Second) { + if reloadIntervalDuration == model.Duration(0) { reloadIntervalDuration = model.Duration(5 * time.Second) } diff --git a/scrape/manager_test.go b/scrape/manager_test.go index c8d9bd698..c71691c95 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -20,6 +20,7 @@ import ( "net/http/httptest" "net/url" "os" + "sort" "strconv" "sync" "testing" @@ -36,6 +37,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery" + _ "github.com/prometheus/prometheus/discovery/file" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" @@ -722,8 +724,6 @@ func TestManagerCTZeroIngestion(t *testing.T) { name string counterSample *dto.Counter enableCTZeroIngestion bool - - expectedValues []float64 }{ { name: "disabled with CT on counter", @@ -732,7 +732,6 @@ func TestManagerCTZeroIngestion(t *testing.T) { // Timestamp does not matter as long as it exists in this test. CreatedTimestamp: timestamppb.Now(), }, - expectedValues: []float64{1.0}, }, { name: "enabled with CT on counter", @@ -742,7 +741,6 @@ func TestManagerCTZeroIngestion(t *testing.T) { CreatedTimestamp: timestamppb.Now(), }, enableCTZeroIngestion: true, - expectedValues: []float64{0.0, 1.0}, }, { name: "enabled without CT on counter", @@ -750,7 +748,6 @@ func TestManagerCTZeroIngestion(t *testing.T) { Value: proto.Float64(1.0), }, enableCTZeroIngestion: true, - expectedValues: []float64{1.0}, }, } { t.Run(tc.name, func(t *testing.T) { @@ -817,46 +814,44 @@ func TestManagerCTZeroIngestion(t *testing.T) { }) scrapeManager.reload() + var got []float64 // Wait for one scrape. ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error { - if countFloatSamples(app, mName) != len(tc.expectedValues) { - return fmt.Errorf("expected %v samples", tc.expectedValues) + app.mtx.Lock() + defer app.mtx.Unlock() + + // Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug + // and it's not worth waiting. + for _, f := range app.resultFloats { + if f.metric.Get(model.MetricNameLabel) == mName { + got = append(got, f.f) + } } - return nil + if len(app.resultFloats) > 0 { + return nil + } + return fmt.Errorf("expected some samples, got none") }), "after 1 minute") scrapeManager.Stop() - require.Equal(t, tc.expectedValues, getResultFloats(app, mName)) + // Check for zero samples, assuming we only injected always one sample. + // Did it contain CT to inject? If yes, was CT zero enabled? + if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { + require.Len(t, got, 2) + require.Equal(t, 0.0, got[0]) + require.Equal(t, tc.counterSample.GetValue(), got[1]) + return + } + + // Expect only one, valid sample. + require.Len(t, got, 1) + require.Equal(t, tc.counterSample.GetValue(), got[0]) }) } } -func countFloatSamples(a *collectResultAppender, expectedMetricName string) (count int) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, f := range a.resultFloats { - if f.metric.Get(model.MetricNameLabel) == expectedMetricName { - count++ - } - } - return count -} - -func getResultFloats(app *collectResultAppender, expectedMetricName string) (result []float64) { - app.mtx.Lock() - defer app.mtx.Unlock() - - for _, f := range app.resultFloats { - if f.metric.Get(model.MetricNameLabel) == expectedMetricName { - result = append(result, f.f) - } - } - return result -} - func TestUnregisterMetrics(t *testing.T) { reg := prometheus.NewRegistry() // Check that all metrics can be unregistered, allowing a second manager to be created. @@ -869,3 +864,414 @@ func TestUnregisterMetrics(t *testing.T) { manager.UnregisterMetrics() } } + +func applyConfig( + t *testing.T, + config string, + scrapeManager *Manager, + discoveryManager *discovery.Manager, +) { + t.Helper() + + cfg := loadConfiguration(t, config) + require.NoError(t, scrapeManager.ApplyConfig(cfg)) + + c := make(map[string]discovery.Configs) + scfgs, err := cfg.GetScrapeConfigs() + require.NoError(t, err) + for _, v := range scfgs { + c[v.JobName] = v.ServiceDiscoveryConfigs + } + require.NoError(t, discoveryManager.ApplyConfig(c)) +} + +func runManagers(t *testing.T, ctx context.Context) (*discovery.Manager, *Manager) { + t.Helper() + + reg := prometheus.NewRegistry() + sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg)) + require.NoError(t, err) + discoveryManager := discovery.NewManager( + ctx, + log.NewNopLogger(), + reg, + sdMetrics, + discovery.Updatert(100*time.Millisecond), + ) + scrapeManager, err := NewManager( + &Options{DiscoveryReloadInterval: model.Duration(100 * time.Millisecond)}, + nil, + nopAppendable{}, + prometheus.NewRegistry(), + ) + require.NoError(t, err) + go discoveryManager.Run() + go scrapeManager.Run(discoveryManager.SyncCh()) + return discoveryManager, scrapeManager +} + +func writeIntoFile(t *testing.T, content, filePattern string) *os.File { + t.Helper() + + file, err := os.CreateTemp("", filePattern) + require.NoError(t, err) + _, err = file.WriteString(content) + require.NoError(t, err) + return file +} + +func requireTargets( + t *testing.T, + scrapeManager *Manager, + jobName string, + waitToAppear bool, + expectedTargets []string, +) { + t.Helper() + + require.Eventually(t, func() bool { + targets, ok := scrapeManager.TargetsActive()[jobName] + if !ok { + if waitToAppear { + return false + } + t.Fatalf("job %s shouldn't be dropped", jobName) + } + if expectedTargets == nil { + return targets == nil + } + if len(targets) != len(expectedTargets) { + return false + } + sTargets := []string{} + for _, t := range targets { + sTargets = append(sTargets, t.String()) + } + sort.Strings(expectedTargets) + sort.Strings(sTargets) + for i, t := range sTargets { + if t != expectedTargets[i] { + return false + } + } + return true + }, 1*time.Second, 100*time.Millisecond) +} + +// TestTargetDisappearsAfterProviderRemoved makes sure that when a provider is dropped, (only) its targets are dropped. +func TestTargetDisappearsAfterProviderRemoved(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + myJob := "my-job" + myJobSDTargetURL := "my:9876" + myJobStaticTargetURL := "my:5432" + + sdFileContent := fmt.Sprintf(`[{"targets": ["%s"]}]`, myJobSDTargetURL) + sDFile := writeIntoFile(t, sdFileContent, "*targets.json") + + baseConfig := ` +scrape_configs: +- job_name: %s + static_configs: + - targets: ['%s'] + file_sd_configs: + - files: ['%s'] +` + + discoveryManager, scrapeManager := runManagers(t, ctx) + defer scrapeManager.Stop() + + applyConfig( + t, + fmt.Sprintf( + baseConfig, + myJob, + myJobStaticTargetURL, + sDFile.Name(), + ), + scrapeManager, + discoveryManager, + ) + // Make sure the jobs targets are taken into account + requireTargets( + t, + scrapeManager, + myJob, + true, + []string{ + fmt.Sprintf("http://%s/metrics", myJobSDTargetURL), + fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL), + }, + ) + + // Apply a new config where a provider is removed + baseConfig = ` +scrape_configs: +- job_name: %s + static_configs: + - targets: ['%s'] +` + applyConfig( + t, + fmt.Sprintf( + baseConfig, + myJob, + myJobStaticTargetURL, + ), + scrapeManager, + discoveryManager, + ) + // Make sure the corresponding target was dropped + requireTargets( + t, + scrapeManager, + myJob, + false, + []string{ + fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL), + }, + ) + + // Apply a new config with no providers + baseConfig = ` +scrape_configs: +- job_name: %s +` + applyConfig( + t, + fmt.Sprintf( + baseConfig, + myJob, + ), + scrapeManager, + discoveryManager, + ) + // Make sure the corresponding target was dropped + requireTargets( + t, + scrapeManager, + myJob, + false, + nil, + ) +} + +// TestOnlyProviderStaleTargetsAreDropped makes sure that when a job has only one provider with multiple targets +// and when the provider can no longer discover some of those targets, only those stale targets are dropped. +func TestOnlyProviderStaleTargetsAreDropped(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + jobName := "my-job" + jobTarget1URL := "foo:9876" + jobTarget2URL := "foo:5432" + + sdFile1Content := fmt.Sprintf(`[{"targets": ["%s"]}]`, jobTarget1URL) + sdFile2Content := fmt.Sprintf(`[{"targets": ["%s"]}]`, jobTarget2URL) + sDFile1 := writeIntoFile(t, sdFile1Content, "*targets.json") + sDFile2 := writeIntoFile(t, sdFile2Content, "*targets.json") + + baseConfig := ` +scrape_configs: +- job_name: %s + file_sd_configs: + - files: ['%s', '%s'] +` + discoveryManager, scrapeManager := runManagers(t, ctx) + defer scrapeManager.Stop() + + applyConfig( + t, + fmt.Sprintf(baseConfig, jobName, sDFile1.Name(), sDFile2.Name()), + scrapeManager, + discoveryManager, + ) + + // Make sure the job's targets are taken into account + requireTargets( + t, + scrapeManager, + jobName, + true, + []string{ + fmt.Sprintf("http://%s/metrics", jobTarget1URL), + fmt.Sprintf("http://%s/metrics", jobTarget2URL), + }, + ) + + // Apply the same config for the same job but with a non existing file to make the provider + // unable to discover some targets + applyConfig( + t, + fmt.Sprintf(baseConfig, jobName, sDFile1.Name(), "/idontexistdoi.json"), + scrapeManager, + discoveryManager, + ) + + // The old target should get dropped + requireTargets( + t, + scrapeManager, + jobName, + false, + []string{fmt.Sprintf("http://%s/metrics", jobTarget1URL)}, + ) +} + +// TestProviderStaleTargetsAreDropped makes sure that when a job has only one provider and when that provider +// should no longer discover targets, the targets of that provider are dropped. +// See: https://github.com/prometheus/prometheus/issues/12858 +func TestProviderStaleTargetsAreDropped(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + jobName := "my-job" + jobTargetURL := "foo:9876" + + sdFileContent := fmt.Sprintf(`[{"targets": ["%s"]}]`, jobTargetURL) + sDFile := writeIntoFile(t, sdFileContent, "*targets.json") + + baseConfig := ` +scrape_configs: +- job_name: %s + file_sd_configs: + - files: ['%s'] +` + discoveryManager, scrapeManager := runManagers(t, ctx) + defer scrapeManager.Stop() + + applyConfig( + t, + fmt.Sprintf(baseConfig, jobName, sDFile.Name()), + scrapeManager, + discoveryManager, + ) + + // Make sure the job's targets are taken into account + requireTargets( + t, + scrapeManager, + jobName, + true, + []string{ + fmt.Sprintf("http://%s/metrics", jobTargetURL), + }, + ) + + // Apply the same config for the same job but with a non existing file to make the provider + // unable to discover some targets + applyConfig( + t, + fmt.Sprintf(baseConfig, jobName, "/idontexistdoi.json"), + scrapeManager, + discoveryManager, + ) + + // The old target should get dropped + requireTargets( + t, + scrapeManager, + jobName, + false, + nil, + ) +} + +// TestOnlyStaleTargetsAreDropped makes sure that when a job has multiple providers, when one of them should no +// longer discover targets, only the stale targets of that provier are dropped. +func TestOnlyStaleTargetsAreDropped(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + myJob := "my-job" + myJobSDTargetURL := "my:9876" + myJobStaticTargetURL := "my:5432" + otherJob := "other-job" + otherJobTargetURL := "other:1234" + + sdFileContent := fmt.Sprintf(`[{"targets": ["%s"]}]`, myJobSDTargetURL) + sDFile := writeIntoFile(t, sdFileContent, "*targets.json") + + baseConfig := ` +scrape_configs: +- job_name: %s + static_configs: + - targets: ['%s'] + file_sd_configs: + - files: ['%s'] +- job_name: %s + static_configs: + - targets: ['%s'] +` + + discoveryManager, scrapeManager := runManagers(t, ctx) + defer scrapeManager.Stop() + + // Apply the initial config with an existing file + applyConfig( + t, + fmt.Sprintf( + baseConfig, + myJob, + myJobStaticTargetURL, + sDFile.Name(), + otherJob, + otherJobTargetURL, + ), + scrapeManager, + discoveryManager, + ) + // Make sure the jobs targets are taken into account + requireTargets( + t, + scrapeManager, + myJob, + true, + []string{ + fmt.Sprintf("http://%s/metrics", myJobSDTargetURL), + fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL), + }, + ) + requireTargets( + t, + scrapeManager, + otherJob, + true, + []string{fmt.Sprintf("http://%s/metrics", otherJobTargetURL)}, + ) + + // Apply the same config with a non existing file for myJob + applyConfig( + t, + fmt.Sprintf( + baseConfig, + myJob, + myJobStaticTargetURL, + "/idontexistdoi.json", + otherJob, + otherJobTargetURL, + ), + scrapeManager, + discoveryManager, + ) + + // Only the SD target should get dropped for myJob + requireTargets( + t, + scrapeManager, + myJob, + false, + []string{ + fmt.Sprintf("http://%s/metrics", myJobStaticTargetURL), + }, + ) + // The otherJob should keep its target + requireTargets( + t, + scrapeManager, + otherJob, + false, + []string{fmt.Sprintf("http://%s/metrics", otherJobTargetURL)}, + ) +} diff --git a/scrape/scrape.go b/scrape/scrape.go index 68411a62e..2abd4691d 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -111,6 +111,7 @@ type scrapeLoopOptions struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool + validationScheme model.ValidationScheme mrc []*relabel.Config cache *scrapeCache @@ -186,6 +187,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed options.PassMetadataInContext, metrics, options.skipOffsetting, + opts.validationScheme, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -303,6 +305,11 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { mrc = sp.config.MetricRelabelConfigs ) + validationScheme := model.LegacyValidation + if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { + validationScheme = model.UTF8Validation + } + sp.targetMtx.Lock() forcedErr := sp.refreshTargetLimitErr() @@ -323,7 +330,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), } newLoop = sp.newLoop(scrapeLoopOptions{ @@ -341,6 +348,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { cache: cache, interval: interval, timeout: timeout, + validationScheme: validationScheme, }) ) if err != nil { @@ -452,6 +460,11 @@ func (sp *scrapePool) sync(targets []*Target) { scrapeClassicHistograms = sp.config.ScrapeClassicHistograms ) + validationScheme := model.LegacyValidation + if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { + validationScheme = model.UTF8Validation + } + sp.targetMtx.Lock() for _, t := range targets { hash := t.hash() @@ -467,7 +480,7 @@ func (sp *scrapePool) sync(targets []*Target) { client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(sp.config.ScrapeProtocols), + acceptHeader: acceptHeader(sp.config.ScrapeProtocols, validationScheme), acceptEncodingHeader: acceptEncodingHeader(enableCompression), metrics: sp.metrics, } @@ -714,11 +727,16 @@ var errBodySizeLimit = errors.New("body size limit exceeded") // acceptHeader transforms preference from the options into specific header values as // https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines. // No validation is here, we expect scrape protocols to be validated already. -func acceptHeader(sps []config.ScrapeProtocol) string { +func acceptHeader(sps []config.ScrapeProtocol, scheme model.ValidationScheme) string { var vals []string weight := len(config.ScrapeProtocolsHeaders) + 1 for _, sp := range sps { - vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight)) + val := config.ScrapeProtocolsHeaders[sp] + if scheme == model.UTF8Validation { + val += ";" + config.UTF8NamesHeader + } + val += fmt.Sprintf(";q=0.%d", weight) + vals = append(vals, val) weight-- } // Default match anything. @@ -838,6 +856,7 @@ type scrapeLoop struct { interval time.Duration timeout time.Duration scrapeClassicHistograms bool + validationScheme model.ValidationScheme // Feature flagged options. enableNativeHistogramIngestion bool @@ -1145,6 +1164,7 @@ func newScrapeLoop(ctx context.Context, passMetadataInContext bool, metrics *scrapeMetrics, skipOffsetting bool, + validationScheme model.ValidationScheme, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1196,6 +1216,7 @@ func newScrapeLoop(ctx context.Context, appendMetadataToWAL: appendMetadataToWAL, metrics: metrics, skipOffsetting: skipOffsetting, + validationScheme: validationScheme, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1616,7 +1637,7 @@ loop: err = errNameLabelMandatory break loop } - if !lset.IsValid() { + if !lset.IsValid(sl.validationScheme) { err = fmt.Errorf("invalid metric name or label names: %s", lset.String()) break loop } @@ -1631,15 +1652,17 @@ loop: updateMetadata(lset, true) } - if seriesAlreadyScraped { + if seriesAlreadyScraped && parsedTimestamp == nil { err = storage.ErrDuplicateSampleForTimestamp } else { - if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) - if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. - // CT is an experimental feature. For now, we don't need to fail the - // scrape on errors updating the created timestamp, log debug. - level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + if sl.enableCTZeroIngestion { + if ctMs := p.CreatedTimestamp(); ctMs != nil { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. + // CT is an experimental feature. For now, we don't need to fail the + // scrape on errors updating the created timestamp, log debug. + level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + } } } diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index a3fe6ac1a..b703f21d4 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -35,6 +35,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" + prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -683,6 +684,7 @@ func newBasicScrapeLoop(t testing.TB, ctx context.Context, scraper scraper, app false, newTestScrapeMetrics(t), false, + model.LegacyValidation, ) } @@ -825,6 +827,7 @@ func TestScrapeLoopRun(t *testing.T) { false, scrapeMetrics, false, + model.LegacyValidation, ) // The loop must terminate during the initial offset if the context @@ -969,6 +972,7 @@ func TestScrapeLoopMetadata(t *testing.T) { false, scrapeMetrics, false, + model.LegacyValidation, ) defer cancel() @@ -1064,6 +1068,40 @@ func TestScrapeLoopFailWithInvalidLabelsAfterRelabel(t *testing.T) { require.Equal(t, 0, seriesAdded) } +func TestScrapeLoopFailLegacyUnderUTF8(t *testing.T) { + // Test that scrapes fail when default validation is utf8 but scrape config is + // legacy. + model.NameValidationScheme = model.UTF8Validation + defer func() { + model.NameValidationScheme = model.LegacyValidation + }() + s := teststorage.New(t) + defer s.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sl := newBasicScrapeLoop(t, ctx, &testScraper{}, s.Appender, 0) + sl.validationScheme = model.LegacyValidation + + slApp := sl.appender(ctx) + total, added, seriesAdded, err := sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{}) + require.ErrorContains(t, err, "invalid metric name or label names") + require.NoError(t, slApp.Rollback()) + require.Equal(t, 1, total) + require.Equal(t, 0, added) + require.Equal(t, 0, seriesAdded) + + // When scrapeloop has validation set to UTF-8, the metric is allowed. + sl.validationScheme = model.UTF8Validation + + slApp = sl.appender(ctx) + total, added, seriesAdded, err = sl.append(slApp, []byte("{\"test.metric\"} 1\n"), "", time.Time{}) + require.NoError(t, err) + require.Equal(t, 1, total) + require.Equal(t, 1, added) + require.Equal(t, 1, seriesAdded) +} + func makeTestMetrics(n int) []byte { // Construct a metrics string to parse sb := bytes.Buffer{} @@ -2339,11 +2377,15 @@ func TestTargetScraperScrapeOK(t *testing.T) { ) var protobufParsing bool + var allowUTF8 bool server := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + accept := r.Header.Get("Accept") + if allowUTF8 { + require.Truef(t, strings.Contains(accept, "escaping=allow-utf-8"), "Expected Accept header to allow utf8, got %q", accept) + } if protobufParsing { - accept := r.Header.Get("Accept") require.True(t, strings.HasPrefix(accept, "application/vnd.google.protobuf;"), "Expected Accept header to prefer application/vnd.google.protobuf.") } @@ -2351,7 +2393,11 @@ func TestTargetScraperScrapeOK(t *testing.T) { timeout := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds") require.Equal(t, expectedTimeout, timeout, "Expected scrape timeout header.") - w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + if allowUTF8 { + w.Header().Set("Content-Type", `text/plain; version=1.0.0; escaping=allow-utf-8`) + } else { + w.Header().Set("Content-Type", `text/plain; version=0.0.4`) + } w.Write([]byte("metric_a 1\nmetric_b 2\n")) }), ) @@ -2380,13 +2426,22 @@ func TestTargetScraperScrapeOK(t *testing.T) { require.NoError(t, err) contentType, err := ts.readResponse(context.Background(), resp, &buf) require.NoError(t, err) - require.Equal(t, "text/plain; version=0.0.4", contentType) + if allowUTF8 { + require.Equal(t, "text/plain; version=1.0.0; escaping=allow-utf-8", contentType) + } else { + require.Equal(t, "text/plain; version=0.0.4", contentType) + } require.Equal(t, "metric_a 1\nmetric_b 2\n", buf.String()) } - runTest(acceptHeader(config.DefaultScrapeProtocols)) + runTest(acceptHeader(config.DefaultScrapeProtocols, model.LegacyValidation)) protobufParsing = true - runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols)) + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.LegacyValidation)) + protobufParsing = false + allowUTF8 = true + runTest(acceptHeader(config.DefaultScrapeProtocols, model.UTF8Validation)) + protobufParsing = true + runTest(acceptHeader(config.DefaultProtoFirstScrapeProtocols, model.UTF8Validation)) } func TestTargetScrapeScrapeCancel(t *testing.T) { @@ -2412,7 +2467,7 @@ func TestTargetScrapeScrapeCancel(t *testing.T) { ), }, client: http.DefaultClient, - acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), } ctx, cancel := context.WithCancel(context.Background()) @@ -2467,7 +2522,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) { ), }, client: http.DefaultClient, - acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), } resp, err := ts.scrape(context.Background()) @@ -2511,7 +2566,7 @@ func TestTargetScraperBodySizeLimit(t *testing.T) { }, client: http.DefaultClient, bodySizeLimit: bodySizeLimit, - acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols), + acceptHeader: acceptHeader(config.DefaultGlobalConfig.ScrapeProtocols, model.LegacyValidation), metrics: newTestScrapeMetrics(t), } var buf bytes.Buffer @@ -3627,6 +3682,7 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { require.Equal(t, 3, total) require.Equal(t, 3, added) require.Equal(t, 1, seriesAdded) + require.Equal(t, 2.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) slApp = sl.appender(ctx) total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{}) @@ -3635,12 +3691,18 @@ func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) { require.Equal(t, 3, total) require.Equal(t, 3, added) require.Equal(t, 0, seriesAdded) + require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) - metric := dto.Metric{} - err = sl.metrics.targetScrapeSampleDuplicate.Write(&metric) + // When different timestamps are supplied, multiple samples are accepted. + slApp = sl.appender(ctx) + total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1 1001\ntest_metric 1 1002\ntest_metric 1 1003\n"), "", time.Time{}) require.NoError(t, err) - value := metric.GetCounter().GetValue() - require.Equal(t, 4.0, value) + require.NoError(t, slApp.Commit()) + require.Equal(t, 3, total) + require.Equal(t, 3, added) + require.Equal(t, 0, seriesAdded) + // Metric is not higher than last time. + require.Equal(t, 4.0, prom_testutil.ToFloat64(sl.metrics.targetScrapeSampleDuplicate)) } // This tests running a full scrape loop and checking that the scrape option diff --git a/scrape/testdata/ca.cer b/scrape/testdata/ca.cer index 86f627a90..dbbd009d4 100644 --- a/scrape/testdata/ca.cer +++ b/scrape/testdata/ca.cer @@ -1,8 +1,66 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 93:6c:9e:29:8d:37:7b:66 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA + Validity + Not Before: Aug 20 11:51:23 2024 GMT + Not After : Dec 5 11:51:23 2044 GMT + Subject: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e9:52:05:4d:f2:5a:95:04:2d:b8:73:8b:3c:e7: + 47:48:67:00:be:dd:6c:41:f3:7c:4b:44:73:a3:77: + 3e:84:af:30:d7:2a:ad:45:6a:b7:89:23:05:15:b3: + aa:46:79:b8:95:64:cc:13:c4:44:a1:01:a0:e2:3d: + a5:67:2b:aa:d3:13:06:43:33:1c:96:36:12:9e:c6: + 1d:36:9b:d7:47:bd:28:2d:88:15:04:fa:14:a3:ff: + 8c:26:22:c5:a2:15:c7:76:b3:11:f6:a3:44:9a:28: + 0f:ca:fb:f4:51:a8:6a:05:94:7c:77:47:c8:21:56: + 25:bf:e2:2e:df:33:f3:e4:bd:d6:47:a5:49:13:12: + c8:1f:4c:d7:2a:56:a2:6c:c1:cf:55:05:5d:9a:75: + a2:23:4e:e6:8e:ff:76:05:d6:e0:c8:0b:51:f0:b6: + a1:b2:7d:8f:9c:6a:a5:ce:86:92:15:8c:5b:86:45: + c0:4a:ff:54:b8:ee:cf:11:bd:07:cb:4b:7d:0b:a1: + 9d:72:86:9f:55:bc:f9:6c:d9:55:60:96:30:3f:ec: + 2d:f6:5f:9a:32:9a:5a:5c:1c:5f:32:f9:d6:0f:04: + f8:81:08:04:9a:95:c3:9d:5a:30:8e:a5:0e:47:2f: + 00:ce:e0:2e:ad:5a:b8:b6:4c:55:7c:8a:59:22:b0: + ed:73 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B + X509v3 Authority Key Identifier: + CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B + X509v3 Basic Constraints: + CA:TRUE + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 4a:a1:b0:bc:c8:87:4f:7c:96:62:e5:09:29:ae:3a:2e:68:d0: + d2:c5:68:ed:ea:83:36:b1:86:f3:b9:e9:19:2b:b6:73:10:6f: + df:7f:bb:f1:76:81:03:c1:a1:5a:ee:6c:44:b8:7c:10:d1:5a: + d7:c1:92:64:59:35:a6:e0:aa:08:41:37:6e:e7:c8:b6:bd:0c: + 4b:47:78:ec:c4:b4:15:a3:62:76:4a:39:8e:6e:19:ff:f0:c0: + 8a:7e:1c:cd:87:e5:00:6c:f1:ce:27:26:ff:b8:e9:eb:f7:2f: + bd:c2:4b:9c:d6:57:de:74:74:b3:4f:03:98:9a:b5:08:2d:16: + ca:7f:b6:c8:76:62:86:1b:7c:f2:3e:6c:78:cc:2c:95:9a:bb: + 77:25:e8:80:ff:9b:e8:f8:9a:85:3b:85:b7:17:4e:77:a1:cf: + 4d:b9:d0:25:e8:5d:8c:e6:7c:f1:d9:52:30:3d:ec:2b:37:91: + bc:e2:e8:39:31:6f:3d:e9:98:70:80:7c:41:dd:19:13:05:21: + 94:7b:16:cf:d8:ee:4e:38:34:5e:6a:ff:cd:85:ac:8f:94:9a: + dd:4e:77:05:13:a6:b4:80:52:b2:97:64:76:88:f4:dd:42:0a: + 50:1c:80:fd:4b:6e:a9:62:10:aa:ef:2e:c1:2f:be:0e:c2:2e: + b5:28:5f:83 -----BEGIN CERTIFICATE----- MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg -Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4 -MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4 +MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq @@ -12,11 +70,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/ VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1 BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL -rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu -e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1 -0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k -pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH -U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx -j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU -mM5lH/s= +rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl +CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ +NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3 +L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7 +hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y +7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C +LrUoX4M= -----END CERTIFICATE----- diff --git a/scripts/golangci-lint.yml b/scripts/golangci-lint.yml index 83ae3906c..f4a7385bb 100644 --- a/scripts/golangci-lint.yml +++ b/scripts/golangci-lint.yml @@ -26,14 +26,14 @@ jobs: - name: Checkout repository uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install Go - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: 1.22.x - name: Install snmp_exporter/generator dependencies run: sudo apt-get update && sudo apt-get -y install libsnmp-dev if: github.repository == 'prometheus/snmp_exporter' - name: Lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: args: --verbose - version: v1.59.1 + version: v1.60.2 diff --git a/scripts/sync_repo_files.sh b/scripts/sync_repo_files.sh index 6459fb1e7..102933629 100755 --- a/scripts/sync_repo_files.sh +++ b/scripts/sync_repo_files.sh @@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then fi # List of files that should be synced. -SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml" +SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml .github/workflows/stale.yml" # Go to the root of the repo cd "$(git rev-parse --show-cdup)" || exit 1 diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 1b24e5da2..b5c6443ac 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -96,10 +96,10 @@ func TestSampleRingMixed(t *testing.T) { // With ValNone as the preferred type, nothing should be initialized. r := newSampleRing(10, 2, chunkenc.ValNone) - require.Zero(t, len(r.fBuf)) - require.Zero(t, len(r.hBuf)) - require.Zero(t, len(r.fhBuf)) - require.Zero(t, len(r.iBuf)) + require.Empty(t, r.fBuf) + require.Empty(t, r.hBuf) + require.Empty(t, r.fhBuf) + require.Empty(t, r.iBuf) // But then mixed adds should work as expected. r.addF(fSample{t: 1, f: 3.14}) @@ -146,10 +146,10 @@ func TestSampleRingAtFloatHistogram(t *testing.T) { // With ValNone as the preferred type, nothing should be initialized. r := newSampleRing(10, 2, chunkenc.ValNone) - require.Zero(t, len(r.fBuf)) - require.Zero(t, len(r.hBuf)) - require.Zero(t, len(r.fhBuf)) - require.Zero(t, len(r.iBuf)) + require.Empty(t, r.fBuf) + require.Empty(t, r.hBuf) + require.Empty(t, r.fhBuf) + require.Empty(t, r.iBuf) var ( h *histogram.Histogram diff --git a/storage/errors.go b/storage/errors.go index eff70f678..dd48066db 100644 --- a/storage/errors.go +++ b/storage/errors.go @@ -16,9 +16,10 @@ package storage import "fmt" type errDuplicateSampleForTimestamp struct { - timestamp int64 - existing float64 - newValue float64 + timestamp int64 + existing float64 + existingIsHistogram bool + newValue float64 } func NewDuplicateFloatErr(t int64, existing, newValue float64) error { @@ -29,13 +30,26 @@ func NewDuplicateFloatErr(t int64, existing, newValue float64) error { } } +// NewDuplicateHistogramToFloatErr describes an error where a new float sample is sent for same timestamp as previous histogram. +func NewDuplicateHistogramToFloatErr(t int64, newValue float64) error { + return errDuplicateSampleForTimestamp{ + timestamp: t, + existingIsHistogram: true, + newValue: newValue, + } +} + func (e errDuplicateSampleForTimestamp) Error() string { if e.timestamp == 0 { return "duplicate sample for timestamp" } + if e.existingIsHistogram { + return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing is a histogram, new value %g", e.timestamp, e.newValue) + } return fmt.Sprintf("duplicate sample for timestamp %d; overrides not allowed: existing %g, new value %g", e.timestamp, e.existing, e.newValue) } +// Is implements the anonymous interface checked by errors.Is. // Every errDuplicateSampleForTimestamp compares equal to the global ErrDuplicateSampleForTimestamp. func (e errDuplicateSampleForTimestamp) Is(t error) bool { if t == ErrDuplicateSampleForTimestamp { diff --git a/storage/errors_test.go b/storage/errors_test.go new file mode 100644 index 000000000..b3e202b49 --- /dev/null +++ b/storage/errors_test.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestErrDuplicateSampleForTimestamp(t *testing.T) { + // All errDuplicateSampleForTimestamp are ErrDuplicateSampleForTimestamp + require.ErrorIs(t, ErrDuplicateSampleForTimestamp, errDuplicateSampleForTimestamp{}) + + // Same type only is if it has same properties. + err := NewDuplicateFloatErr(1_000, 10, 20) + sameErr := NewDuplicateFloatErr(1_000, 10, 20) + differentErr := NewDuplicateFloatErr(1_001, 30, 40) + + require.ErrorIs(t, err, sameErr) + require.NotErrorIs(t, err, differentErr) + + // Also works when err is wrapped. + require.ErrorIs(t, fmt.Errorf("failed: %w", err), sameErr) + require.NotErrorIs(t, fmt.Errorf("failed: %w", err), differentErr) +} diff --git a/storage/interface.go b/storage/interface.go index f85f985e9..2f125e590 100644 --- a/storage/interface.go +++ b/storage/interface.go @@ -227,9 +227,9 @@ type LabelHints struct { Limit int } -// TODO(bwplotka): Move to promql/engine_test.go? // QueryableFunc is an adapter to allow the use of ordinary functions as // Queryables. It follows the idea of http.HandlerFunc. +// TODO(bwplotka): Move to promql/engine_test.go? type QueryableFunc func(mint, maxt int64) (Querier, error) // Querier calls f() with the given parameters. diff --git a/storage/interface_test.go b/storage/interface_test.go new file mode 100644 index 000000000..ba6072173 --- /dev/null +++ b/storage/interface_test.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" +) + +func TestMockSeries(t *testing.T) { + s := storage.MockSeries([]int64{1, 2, 3}, []float64{1, 2, 3}, []string{"__name__", "foo"}) + it := s.Iterator(nil) + ts := []int64{} + vs := []float64{} + for it.Next() == chunkenc.ValFloat { + t, v := it.At() + ts = append(ts, t) + vs = append(vs, v) + } + require.Equal(t, []int64{1, 2, 3}, ts) + require.Equal(t, []float64{1, 2, 3}, vs) +} diff --git a/storage/remote/azuread/azuread.go b/storage/remote/azuread/azuread.go index 58520c6a5..82f46b82f 100644 --- a/storage/remote/azuread/azuread.go +++ b/storage/remote/azuread/azuread.go @@ -31,13 +31,15 @@ import ( "github.com/google/uuid" ) +// Clouds. const ( - // Clouds. AzureChina = "AzureChina" AzureGovernment = "AzureGovernment" AzurePublic = "AzurePublic" +) - // Audiences. +// Audiences. +const ( IngestionChinaAudience = "https://monitor.azure.cn//.default" IngestionGovernmentAudience = "https://monitor.azure.us//.default" IngestionPublicAudience = "https://monitor.azure.com//.default" diff --git a/storage/remote/chunked.go b/storage/remote/chunked.go index 96ce483e0..aa5addd6a 100644 --- a/storage/remote/chunked.go +++ b/storage/remote/chunked.go @@ -26,10 +26,6 @@ import ( "github.com/gogo/protobuf/proto" ) -// DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. -// 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. -const DefaultChunkedReadLimit = 5e+7 - // The table gets initialized with sync.Once but may still cause a race // with any other use of the crc32 package anywhere. Thus we initialize it // before. diff --git a/storage/remote/client.go b/storage/remote/client.go index 11e423b6a..62218cfba 100644 --- a/storage/remote/client.go +++ b/storage/remote/client.go @@ -16,6 +16,7 @@ package remote import ( "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -36,13 +37,14 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote/azuread" "github.com/prometheus/prometheus/storage/remote/googleiam" ) -const maxErrMsgLen = 1024 - const ( + maxErrMsgLen = 1024 + RemoteWriteVersionHeader = "X-Prometheus-Remote-Write-Version" RemoteWriteVersion1HeaderValue = "0.1.0" RemoteWriteVersion20HeaderValue = "2.0.0" @@ -68,9 +70,12 @@ var ( config.RemoteWriteProtoMsgV1: appProtoContentType, // Also application/x-protobuf;proto=prometheus.WriteRequest but simplified for compatibility with 1.x spec. config.RemoteWriteProtoMsgV2: appProtoContentType + ";proto=io.prometheus.write.v2.Request", } -) -var ( + AcceptedResponseTypes = []prompb.ReadRequest_ResponseType{ + prompb.ReadRequest_STREAMED_XOR_CHUNKS, + prompb.ReadRequest_SAMPLES, + } + remoteReadQueriesTotal = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, @@ -78,7 +83,7 @@ var ( Name: "read_queries_total", Help: "The total number of remote read queries.", }, - []string{remoteName, endpoint, "code"}, + []string{remoteName, endpoint, "response_type", "code"}, ) remoteReadQueries = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -94,13 +99,13 @@ var ( Namespace: namespace, Subsystem: subsystem, Name: "read_request_duration_seconds", - Help: "Histogram of the latency for remote read requests.", + Help: "Histogram of the latency for remote read requests. Note that for streamed responses this is only the duration of the initial call and does not include the processing of the stream.", Buckets: append(prometheus.DefBuckets, 25, 60), NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 1 * time.Hour, }, - []string{remoteName, endpoint}, + []string{remoteName, endpoint, "response_type"}, ) ) @@ -116,10 +121,11 @@ type Client struct { timeout time.Duration retryOnRateLimit bool + chunkedReadLimit uint64 readQueries prometheus.Gauge readQueriesTotal *prometheus.CounterVec - readQueriesDuration prometheus.Observer + readQueriesDuration prometheus.ObserverVec writeProtoMsg config.RemoteWriteProtoMsg writeCompression Compression // Not exposed by ClientConfig for now. @@ -136,12 +142,13 @@ type ClientConfig struct { Headers map[string]string RetryOnRateLimit bool WriteProtoMsg config.RemoteWriteProtoMsg + ChunkedReadLimit uint64 } -// ReadClient uses the SAMPLES method of remote read to read series samples from remote server. -// TODO(bwplotka): Add streamed chunked remote read method as well (https://github.com/prometheus/prometheus/issues/5926). +// ReadClient will request the STREAMED_XOR_CHUNKS method of remote read but can +// also fall back to the SAMPLES method if necessary. type ReadClient interface { - Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) + Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) } // NewReadClient creates a new client for remote read. @@ -162,9 +169,10 @@ func NewReadClient(name string, conf *ClientConfig) (ReadClient, error) { urlString: conf.URL.String(), Client: httpClient, timeout: time.Duration(conf.Timeout), + chunkedReadLimit: conf.ChunkedReadLimit, readQueries: remoteReadQueries.WithLabelValues(name, conf.URL.String()), readQueriesTotal: remoteReadQueriesTotal.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}), - readQueriesDuration: remoteReadQueryDuration.WithLabelValues(name, conf.URL.String()), + readQueriesDuration: remoteReadQueryDuration.MustCurryWith(prometheus.Labels{remoteName: name, endpoint: conf.URL.String()}), }, nil } @@ -278,8 +286,8 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo return WriteResponseStats{}, RecoverableError{err, defaultBackoff} } defer func() { - io.Copy(io.Discard, httpResp.Body) - httpResp.Body.Close() + _, _ = io.Copy(io.Discard, httpResp.Body) + _ = httpResp.Body.Close() }() // TODO(bwplotka): Pass logger and emit debug on error? @@ -287,7 +295,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo // we can continue handling. rs, _ := ParseWriteResponseStats(httpResp) - //nolint:usestdlibvars if httpResp.StatusCode/100 == 2 { return rs, nil } @@ -297,7 +304,6 @@ func (c *Client) Store(ctx context.Context, req []byte, attempt int) (WriteRespo body, _ := io.ReadAll(io.LimitReader(httpResp.Body, maxErrMsgLen)) err = fmt.Errorf("server returned HTTP status %s: %s", httpResp.Status, body) - //nolint:usestdlibvars if httpResp.StatusCode/100 == 5 || (c.retryOnRateLimit && httpResp.StatusCode == http.StatusTooManyRequests) { return rs, RecoverableError{err, retryAfterDuration(httpResp.Header.Get("Retry-After"))} @@ -331,17 +337,17 @@ func (c *Client) Endpoint() string { return c.urlString } -// Read reads from a remote endpoint. -func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryResult, error) { +// Read reads from a remote endpoint. The sortSeries parameter is only respected in the case of a sampled response; +// chunked responses arrive already sorted by the server. +func (c *Client) Read(ctx context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) { c.readQueries.Inc() defer c.readQueries.Dec() req := &prompb.ReadRequest{ // TODO: Support batching multiple queries into one read request, // as the protobuf interface allows for it. - Queries: []*prompb.Query{ - query, - }, + Queries: []*prompb.Query{query}, + AcceptedResponseTypes: AcceptedResponseTypes, } data, err := proto.Marshal(req) if err != nil { @@ -360,7 +366,6 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe httpReq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") ctx, cancel := context.WithTimeout(ctx, c.timeout) - defer cancel() ctx, span := otel.Tracer("").Start(ctx, "Remote Read", trace.WithSpanKind(trace.SpanKindClient)) defer span.End() @@ -368,24 +373,57 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe start := time.Now() httpResp, err := c.Client.Do(httpReq.WithContext(ctx)) if err != nil { + cancel() return nil, fmt.Errorf("error sending request: %w", err) } - defer func() { - io.Copy(io.Discard, httpResp.Body) - httpResp.Body.Close() - }() - c.readQueriesDuration.Observe(time.Since(start).Seconds()) - c.readQueriesTotal.WithLabelValues(strconv.Itoa(httpResp.StatusCode)).Inc() - compressed, err = io.ReadAll(httpResp.Body) + if httpResp.StatusCode/100 != 2 { + // Make an attempt at getting an error message. + body, _ := io.ReadAll(httpResp.Body) + _ = httpResp.Body.Close() + + cancel() + return nil, fmt.Errorf("remote server %s returned http status %s: %s", c.urlString, httpResp.Status, string(body)) + } + + contentType := httpResp.Header.Get("Content-Type") + + switch { + case strings.HasPrefix(contentType, "application/x-protobuf"): + c.readQueriesDuration.WithLabelValues("sampled").Observe(time.Since(start).Seconds()) + c.readQueriesTotal.WithLabelValues("sampled", strconv.Itoa(httpResp.StatusCode)).Inc() + ss, err := c.handleSampledResponse(req, httpResp, sortSeries) + cancel() + return ss, err + case strings.HasPrefix(contentType, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"): + c.readQueriesDuration.WithLabelValues("chunked").Observe(time.Since(start).Seconds()) + + s := NewChunkedReader(httpResp.Body, c.chunkedReadLimit, nil) + return NewChunkedSeriesSet(s, httpResp.Body, query.StartTimestampMs, query.EndTimestampMs, func(err error) { + code := strconv.Itoa(httpResp.StatusCode) + if !errors.Is(err, io.EOF) { + code = "aborted_stream" + } + c.readQueriesTotal.WithLabelValues("chunked", code).Inc() + cancel() + }), nil + default: + c.readQueriesDuration.WithLabelValues("unsupported").Observe(time.Since(start).Seconds()) + c.readQueriesTotal.WithLabelValues("unsupported", strconv.Itoa(httpResp.StatusCode)).Inc() + cancel() + return nil, fmt.Errorf("unsupported content type: %s", contentType) + } +} + +func (c *Client) handleSampledResponse(req *prompb.ReadRequest, httpResp *http.Response, sortSeries bool) (storage.SeriesSet, error) { + compressed, err := io.ReadAll(httpResp.Body) if err != nil { return nil, fmt.Errorf("error reading response. HTTP status code: %s: %w", httpResp.Status, err) } - - //nolint:usestdlibvars - if httpResp.StatusCode/100 != 2 { - return nil, fmt.Errorf("remote server %s returned HTTP status %s: %s", c.urlString, httpResp.Status, strings.TrimSpace(string(compressed))) - } + defer func() { + _, _ = io.Copy(io.Discard, httpResp.Body) + _ = httpResp.Body.Close() + }() uncompressed, err := snappy.Decode(nil, compressed) if err != nil { @@ -402,5 +440,8 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe return nil, fmt.Errorf("responses: want %d, got %d", len(req.Queries), len(resp.Results)) } - return resp.Results[0], nil + // This client does not batch queries so there's always only 1 result. + res := resp.Results[0] + + return FromQueryResult(sortSeries, res), nil } diff --git a/storage/remote/client_test.go b/storage/remote/client_test.go index 9184ce100..c8b3d487e 100644 --- a/storage/remote/client_test.go +++ b/storage/remote/client_test.go @@ -23,9 +23,15 @@ import ( "testing" "time" + "github.com/gogo/protobuf/proto" + "github.com/golang/snappy" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) var longErrMessage = strings.Repeat("error message", maxErrMsgLen) @@ -208,3 +214,226 @@ func TestClientCustomHeaders(t *testing.T) { require.True(t, called, "The remote server wasn't called") } + +func TestReadClient(t *testing.T) { + tests := []struct { + name string + query *prompb.Query + httpHandler http.HandlerFunc + expectedLabels []map[string]string + expectedSamples [][]model.SamplePair + expectedErrorContains string + sortSeries bool + }{ + { + name: "sorted sampled response", + httpHandler: sampledResponseHTTPHandler(t), + expectedLabels: []map[string]string{ + {"foo1": "bar"}, + {"foo2": "bar"}, + }, + expectedSamples: [][]model.SamplePair{ + { + {Timestamp: model.Time(0), Value: model.SampleValue(3)}, + {Timestamp: model.Time(5), Value: model.SampleValue(4)}, + }, + { + {Timestamp: model.Time(0), Value: model.SampleValue(1)}, + {Timestamp: model.Time(5), Value: model.SampleValue(2)}, + }, + }, + expectedErrorContains: "", + sortSeries: true, + }, + { + name: "unsorted sampled response", + httpHandler: sampledResponseHTTPHandler(t), + expectedLabels: []map[string]string{ + {"foo2": "bar"}, + {"foo1": "bar"}, + }, + expectedSamples: [][]model.SamplePair{ + { + {Timestamp: model.Time(0), Value: model.SampleValue(1)}, + {Timestamp: model.Time(5), Value: model.SampleValue(2)}, + }, + { + {Timestamp: model.Time(0), Value: model.SampleValue(3)}, + {Timestamp: model.Time(5), Value: model.SampleValue(4)}, + }, + }, + expectedErrorContains: "", + sortSeries: false, + }, + { + name: "chunked response", + query: &prompb.Query{ + StartTimestampMs: 4000, + EndTimestampMs: 12000, + }, + httpHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") + + flusher, ok := w.(http.Flusher) + require.True(t, ok) + + cw := NewChunkedWriter(w, flusher) + l := []prompb.Label{ + {Name: "foo", Value: "bar"}, + } + + chunks := buildTestChunks(t) + for i, c := range chunks { + cSeries := prompb.ChunkedSeries{Labels: l, Chunks: []prompb.Chunk{c}} + readResp := prompb.ChunkedReadResponse{ + ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, + QueryIndex: int64(i), + } + + b, err := proto.Marshal(&readResp) + require.NoError(t, err) + + _, err = cw.Write(b) + require.NoError(t, err) + } + }), + expectedLabels: []map[string]string{ + {"foo": "bar"}, + {"foo": "bar"}, + {"foo": "bar"}, + }, + // This is the output of buildTestChunks minus the samples outside the query range. + expectedSamples: [][]model.SamplePair{ + { + {Timestamp: model.Time(4000), Value: model.SampleValue(4)}, + }, + { + {Timestamp: model.Time(5000), Value: model.SampleValue(1)}, + {Timestamp: model.Time(6000), Value: model.SampleValue(2)}, + {Timestamp: model.Time(7000), Value: model.SampleValue(3)}, + {Timestamp: model.Time(8000), Value: model.SampleValue(4)}, + {Timestamp: model.Time(9000), Value: model.SampleValue(5)}, + }, + { + {Timestamp: model.Time(10000), Value: model.SampleValue(2)}, + {Timestamp: model.Time(11000), Value: model.SampleValue(3)}, + {Timestamp: model.Time(12000), Value: model.SampleValue(4)}, + }, + }, + expectedErrorContains: "", + }, + { + name: "unsupported content type", + httpHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "foobar") + }), + expectedErrorContains: "unsupported content type", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server := httptest.NewServer(test.httpHandler) + defer server.Close() + + u, err := url.Parse(server.URL) + require.NoError(t, err) + + conf := &ClientConfig{ + URL: &config_util.URL{URL: u}, + Timeout: model.Duration(5 * time.Second), + ChunkedReadLimit: config.DefaultChunkedReadLimit, + } + c, err := NewReadClient("test", conf) + require.NoError(t, err) + + query := &prompb.Query{} + if test.query != nil { + query = test.query + } + + ss, err := c.Read(context.Background(), query, test.sortSeries) + if test.expectedErrorContains != "" { + require.ErrorContains(t, err, test.expectedErrorContains) + return + } + + require.NoError(t, err) + + i := 0 + + for ss.Next() { + require.NoError(t, ss.Err()) + s := ss.At() + + l := s.Labels() + require.Len(t, test.expectedLabels[i], l.Len()) + for k, v := range test.expectedLabels[i] { + require.True(t, l.Has(k)) + require.Equal(t, v, l.Get(k)) + } + + it := s.Iterator(nil) + j := 0 + + for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() { + require.NoError(t, it.Err()) + + ts, v := it.At() + expectedSample := test.expectedSamples[i][j] + + require.Equal(t, int64(expectedSample.Timestamp), ts) + require.Equal(t, float64(expectedSample.Value), v) + + j++ + } + + require.Len(t, test.expectedSamples[i], j) + + i++ + } + + require.NoError(t, ss.Err()) + }) + } +} + +func sampledResponseHTTPHandler(t *testing.T) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/x-protobuf") + + resp := prompb.ReadResponse{ + Results: []*prompb.QueryResult{ + { + Timeseries: []*prompb.TimeSeries{ + { + Labels: []prompb.Label{ + {Name: "foo2", Value: "bar"}, + }, + Samples: []prompb.Sample{ + {Value: float64(1), Timestamp: int64(0)}, + {Value: float64(2), Timestamp: int64(5)}, + }, + Exemplars: []prompb.Exemplar{}, + }, + { + Labels: []prompb.Label{ + {Name: "foo1", Value: "bar"}, + }, + Samples: []prompb.Sample{ + {Value: float64(3), Timestamp: int64(0)}, + {Value: float64(4), Timestamp: int64(5)}, + }, + Exemplars: []prompb.Exemplar{}, + }, + }, + }, + }, + } + b, err := proto.Marshal(&resp) + require.NoError(t, err) + + _, err = w.Write(snappy.Encode(nil, b)) + require.NoError(t, err) + } +} diff --git a/storage/remote/codec.go b/storage/remote/codec.go index c9220ca42..80bb81150 100644 --- a/storage/remote/codec.go +++ b/storage/remote/codec.go @@ -540,6 +540,220 @@ func (c *concreteSeriesIterator) Err() error { return nil } +// chunkedSeriesSet implements storage.SeriesSet. +type chunkedSeriesSet struct { + chunkedReader *ChunkedReader + respBody io.ReadCloser + mint, maxt int64 + cancel func(error) + + current storage.Series + err error +} + +func NewChunkedSeriesSet(chunkedReader *ChunkedReader, respBody io.ReadCloser, mint, maxt int64, cancel func(error)) storage.SeriesSet { + return &chunkedSeriesSet{ + chunkedReader: chunkedReader, + respBody: respBody, + mint: mint, + maxt: maxt, + cancel: cancel, + } +} + +// Next return true if there is a next series and false otherwise. It will +// block until the next series is available. +func (s *chunkedSeriesSet) Next() bool { + res := &prompb.ChunkedReadResponse{} + + err := s.chunkedReader.NextProto(res) + if err != nil { + if !errors.Is(err, io.EOF) { + s.err = err + _, _ = io.Copy(io.Discard, s.respBody) + } + + _ = s.respBody.Close() + s.cancel(err) + + return false + } + + s.current = &chunkedSeries{ + ChunkedSeries: prompb.ChunkedSeries{ + Labels: res.ChunkedSeries[0].Labels, + Chunks: res.ChunkedSeries[0].Chunks, + }, + mint: s.mint, + maxt: s.maxt, + } + + return true +} + +func (s *chunkedSeriesSet) At() storage.Series { + return s.current +} + +func (s *chunkedSeriesSet) Err() error { + return s.err +} + +func (s *chunkedSeriesSet) Warnings() annotations.Annotations { + return nil +} + +type chunkedSeries struct { + prompb.ChunkedSeries + mint, maxt int64 +} + +var _ storage.Series = &chunkedSeries{} + +func (s *chunkedSeries) Labels() labels.Labels { + b := labels.NewScratchBuilder(0) + return s.ToLabels(&b, nil) +} + +func (s *chunkedSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { + csIt, ok := it.(*chunkedSeriesIterator) + if ok { + csIt.reset(s.Chunks, s.mint, s.maxt) + return csIt + } + return newChunkedSeriesIterator(s.Chunks, s.mint, s.maxt) +} + +type chunkedSeriesIterator struct { + chunks []prompb.Chunk + idx int + cur chunkenc.Iterator + valType chunkenc.ValueType + mint, maxt int64 + + err error +} + +var _ chunkenc.Iterator = &chunkedSeriesIterator{} + +func newChunkedSeriesIterator(chunks []prompb.Chunk, mint, maxt int64) *chunkedSeriesIterator { + it := &chunkedSeriesIterator{} + it.reset(chunks, mint, maxt) + return it +} + +func (it *chunkedSeriesIterator) Next() chunkenc.ValueType { + if it.err != nil { + return chunkenc.ValNone + } + if len(it.chunks) == 0 { + return chunkenc.ValNone + } + + for it.valType = it.cur.Next(); it.valType != chunkenc.ValNone; it.valType = it.cur.Next() { + atT := it.AtT() + if atT > it.maxt { + it.chunks = nil // Exhaust this iterator so follow-up calls to Next or Seek return fast. + return chunkenc.ValNone + } + if atT >= it.mint { + return it.valType + } + } + + if it.idx >= len(it.chunks)-1 { + it.valType = chunkenc.ValNone + } else { + it.idx++ + it.resetIterator() + it.valType = it.Next() + } + + return it.valType +} + +func (it *chunkedSeriesIterator) Seek(t int64) chunkenc.ValueType { + if it.err != nil { + return chunkenc.ValNone + } + if len(it.chunks) == 0 { + return chunkenc.ValNone + } + + startIdx := it.idx + it.idx += sort.Search(len(it.chunks)-startIdx, func(i int) bool { + return it.chunks[startIdx+i].MaxTimeMs >= t + }) + if it.idx > startIdx { + it.resetIterator() + } else { + ts := it.cur.AtT() + if ts >= t { + return it.valType + } + } + + for it.valType = it.cur.Next(); it.valType != chunkenc.ValNone; it.valType = it.cur.Next() { + ts := it.cur.AtT() + if ts > it.maxt { + it.chunks = nil // Exhaust this iterator so follow-up calls to Next or Seek return fast. + return chunkenc.ValNone + } + if ts >= t && ts >= it.mint { + return it.valType + } + } + + it.valType = chunkenc.ValNone + return it.valType +} + +func (it *chunkedSeriesIterator) resetIterator() { + if it.idx < len(it.chunks) { + chunk := it.chunks[it.idx] + + decodedChunk, err := chunkenc.FromData(chunkenc.Encoding(chunk.Type), chunk.Data) + if err != nil { + it.err = err + return + } + + it.cur = decodedChunk.Iterator(nil) + } else { + it.cur = chunkenc.NewNopIterator() + } +} + +func (it *chunkedSeriesIterator) reset(chunks []prompb.Chunk, mint, maxt int64) { + it.chunks = chunks + it.mint = mint + it.maxt = maxt + it.idx = 0 + if len(chunks) > 0 { + it.resetIterator() + } +} + +func (it *chunkedSeriesIterator) At() (ts int64, v float64) { + return it.cur.At() +} + +func (it *chunkedSeriesIterator) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) { + return it.cur.AtHistogram(h) +} + +func (it *chunkedSeriesIterator) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) { + return it.cur.AtFloatHistogram(fh) +} + +func (it *chunkedSeriesIterator) AtT() int64 { + return it.cur.AtT() +} + +func (it *chunkedSeriesIterator) Err() error { + return it.err +} + // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, // also making sure that there are no labels with duplicate names. func validateLabelsAndMetricName(ls []prompb.Label) error { @@ -612,15 +826,6 @@ func FromLabelMatchers(matchers []*prompb.LabelMatcher) ([]*labels.Matcher, erro return result, nil } -// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric. -func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { - metric := make(model.Metric, len(labelPairs)) - for _, l := range labelPairs { - metric[model.LabelName(l.Name)] = model.LabelValue(l.Value) - } - return metric -} - // DecodeWriteRequest from an io.Reader into a prompb.WriteRequest, handling // snappy decompression. // Used also by documentation/examples/remote_storage. diff --git a/storage/remote/codec_test.go b/storage/remote/codec_test.go index 279d10e41..404f1add7 100644 --- a/storage/remote/codec_test.go +++ b/storage/remote/codec_test.go @@ -16,6 +16,7 @@ package remote import ( "bytes" "fmt" + "io" "sync" "testing" @@ -24,6 +25,7 @@ import ( "github.com/prometheus/common/model" "github.com/stretchr/testify/require" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/metadata" @@ -705,3 +707,270 @@ func (c *mockChunkIterator) Next() bool { func (c *mockChunkIterator) Err() error { return nil } + +func TestChunkedSeriesIterator(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + chks := buildTestChunks(t) + + it := newChunkedSeriesIterator(chks, 2000, 12000) + + require.NoError(t, it.err) + require.NotNil(t, it.cur) + + // Initial next; advance to first valid sample of first chunk. + res := it.Next() + require.Equal(t, chunkenc.ValFloat, res) + require.NoError(t, it.Err()) + + ts, v := it.At() + require.Equal(t, int64(2000), ts) + require.Equal(t, float64(2), v) + + // Next to the second sample of the first chunk. + res = it.Next() + require.Equal(t, chunkenc.ValFloat, res) + require.NoError(t, it.Err()) + + ts, v = it.At() + require.Equal(t, int64(3000), ts) + require.Equal(t, float64(3), v) + + // Attempt to seek to the first sample of the first chunk (should return current sample). + res = it.Seek(0) + require.Equal(t, chunkenc.ValFloat, res) + + ts, v = it.At() + require.Equal(t, int64(3000), ts) + require.Equal(t, float64(3), v) + + // Seek to the end of the first chunk. + res = it.Seek(4000) + require.Equal(t, chunkenc.ValFloat, res) + + ts, v = it.At() + require.Equal(t, int64(4000), ts) + require.Equal(t, float64(4), v) + + // Next to the first sample of the second chunk. + res = it.Next() + require.Equal(t, chunkenc.ValFloat, res) + require.NoError(t, it.Err()) + + ts, v = it.At() + require.Equal(t, int64(5000), ts) + require.Equal(t, float64(1), v) + + // Seek to the second sample of the third chunk. + res = it.Seek(10999) + require.Equal(t, chunkenc.ValFloat, res) + require.NoError(t, it.Err()) + + ts, v = it.At() + require.Equal(t, int64(11000), ts) + require.Equal(t, float64(3), v) + + // Attempt to seek to something past the last sample (should return false and exhaust the iterator). + res = it.Seek(99999) + require.Equal(t, chunkenc.ValNone, res) + require.NoError(t, it.Err()) + + // Attempt to next past the last sample (should return false as the iterator is exhausted). + res = it.Next() + require.Equal(t, chunkenc.ValNone, res) + require.NoError(t, it.Err()) + }) + + t.Run("invalid chunk encoding error", func(t *testing.T) { + chks := buildTestChunks(t) + + // Set chunk type to an invalid value. + chks[0].Type = 8 + + it := newChunkedSeriesIterator(chks, 0, 14000) + + res := it.Next() + require.Equal(t, chunkenc.ValNone, res) + + res = it.Seek(1000) + require.Equal(t, chunkenc.ValNone, res) + + require.ErrorContains(t, it.err, "invalid chunk encoding") + require.Nil(t, it.cur) + }) + + t.Run("empty chunks", func(t *testing.T) { + emptyChunks := make([]prompb.Chunk, 0) + + it1 := newChunkedSeriesIterator(emptyChunks, 0, 1000) + require.Equal(t, chunkenc.ValNone, it1.Next()) + require.Equal(t, chunkenc.ValNone, it1.Seek(1000)) + require.NoError(t, it1.Err()) + + var nilChunks []prompb.Chunk + + it2 := newChunkedSeriesIterator(nilChunks, 0, 1000) + require.Equal(t, chunkenc.ValNone, it2.Next()) + require.Equal(t, chunkenc.ValNone, it2.Seek(1000)) + require.NoError(t, it2.Err()) + }) +} + +func TestChunkedSeries(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + chks := buildTestChunks(t) + + s := chunkedSeries{ + ChunkedSeries: prompb.ChunkedSeries{ + Labels: []prompb.Label{ + {Name: "foo", Value: "bar"}, + {Name: "asdf", Value: "zxcv"}, + }, + Chunks: chks, + }, + } + + require.Equal(t, labels.FromStrings("asdf", "zxcv", "foo", "bar"), s.Labels()) + + it := s.Iterator(nil) + res := it.Next() // Behavior is undefined w/o the initial call to Next. + + require.Equal(t, chunkenc.ValFloat, res) + require.NoError(t, it.Err()) + + ts, v := it.At() + require.Equal(t, int64(0), ts) + require.Equal(t, float64(0), v) + }) +} + +func TestChunkedSeriesSet(t *testing.T) { + t.Run("happy path", func(t *testing.T) { + buf := &bytes.Buffer{} + flusher := &mockFlusher{} + + w := NewChunkedWriter(buf, flusher) + r := NewChunkedReader(buf, config.DefaultChunkedReadLimit, nil) + + chks := buildTestChunks(t) + l := []prompb.Label{ + {Name: "foo", Value: "bar"}, + } + + for i, c := range chks { + cSeries := prompb.ChunkedSeries{Labels: l, Chunks: []prompb.Chunk{c}} + readResp := prompb.ChunkedReadResponse{ + ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, + QueryIndex: int64(i), + } + + b, err := proto.Marshal(&readResp) + require.NoError(t, err) + + _, err = w.Write(b) + require.NoError(t, err) + } + + ss := NewChunkedSeriesSet(r, io.NopCloser(buf), 0, 14000, func(error) {}) + require.NoError(t, ss.Err()) + require.Nil(t, ss.Warnings()) + + res := ss.Next() + require.True(t, res) + require.NoError(t, ss.Err()) + + s := ss.At() + require.Equal(t, 1, s.Labels().Len()) + require.True(t, s.Labels().Has("foo")) + require.Equal(t, "bar", s.Labels().Get("foo")) + + it := s.Iterator(nil) + it.Next() + ts, v := it.At() + require.Equal(t, int64(0), ts) + require.Equal(t, float64(0), v) + + numResponses := 1 + for ss.Next() { + numResponses++ + } + require.Equal(t, numTestChunks, numResponses) + require.NoError(t, ss.Err()) + }) + + t.Run("chunked reader error", func(t *testing.T) { + buf := &bytes.Buffer{} + flusher := &mockFlusher{} + + w := NewChunkedWriter(buf, flusher) + r := NewChunkedReader(buf, config.DefaultChunkedReadLimit, nil) + + chks := buildTestChunks(t) + l := []prompb.Label{ + {Name: "foo", Value: "bar"}, + } + + for i, c := range chks { + cSeries := prompb.ChunkedSeries{Labels: l, Chunks: []prompb.Chunk{c}} + readResp := prompb.ChunkedReadResponse{ + ChunkedSeries: []*prompb.ChunkedSeries{&cSeries}, + QueryIndex: int64(i), + } + + b, err := proto.Marshal(&readResp) + require.NoError(t, err) + + b[0] = 0xFF // Corruption! + + _, err = w.Write(b) + require.NoError(t, err) + } + + ss := NewChunkedSeriesSet(r, io.NopCloser(buf), 0, 14000, func(error) {}) + require.NoError(t, ss.Err()) + require.Nil(t, ss.Warnings()) + + res := ss.Next() + require.False(t, res) + require.ErrorContains(t, ss.Err(), "proto: illegal wireType 7") + }) +} + +// mockFlusher implements http.Flusher. +type mockFlusher struct{} + +func (f *mockFlusher) Flush() {} + +const ( + numTestChunks = 3 + numSamplesPerTestChunk = 5 +) + +func buildTestChunks(t *testing.T) []prompb.Chunk { + startTime := int64(0) + chks := make([]prompb.Chunk, 0, numTestChunks) + + time := startTime + + for i := 0; i < numTestChunks; i++ { + c := chunkenc.NewXORChunk() + + a, err := c.Appender() + require.NoError(t, err) + + minTimeMs := time + + for j := 0; j < numSamplesPerTestChunk; j++ { + a.Append(time, float64(i+j)) + time += int64(1000) + } + + chks = append(chks, prompb.Chunk{ + MinTimeMs: minTimeMs, + MaxTimeMs: time, + Type: prompb.Chunk_XOR, + Data: c.Bytes(), + }) + } + + return chks +} diff --git a/storage/remote/otlptranslator/prometheus/normalize_label.go b/storage/remote/otlptranslator/prometheus/normalize_label.go index 6360aa976..a112b9bbc 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -21,15 +21,14 @@ import ( "unicode" ) -// Normalizes the specified label to follow Prometheus label names standard +// Normalizes the specified label to follow Prometheus label names standard. // -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels. // -// Labels that start with non-letter rune will be prefixed with "key_" +// Labels that start with non-letter rune will be prefixed with "key_". // -// Exception is made for double-underscores which are allowed +// An exception is made for double-underscores which are allowed. func NormalizeLabel(label string) string { - // Trivial case if len(label) == 0 { return label @@ -48,7 +47,7 @@ func NormalizeLabel(label string) string { return label } -// Return '_' for anything non-alphanumeric +// Return '_' for anything non-alphanumeric. func sanitizeRune(r rune) rune { if unicode.IsLetter(r) || unicode.IsDigit(r) { return r diff --git a/storage/remote/otlptranslator/prometheus/normalize_name.go b/storage/remote/otlptranslator/prometheus/normalize_name.go index 71bba40e4..0f472b80a 100644 --- a/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -76,14 +76,15 @@ var perUnitMap = map[string]string{ "y": "year", } -// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric +// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric. // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus // naming convention. // -// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels -// and https://prometheus.io/docs/practices/naming/#metric-and-label-naming +// See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels, +// https://prometheus.io/docs/practices/naming/#metric-and-label-naming +// and https://github.com/open-telemetry/opentelemetry-specification/blob/v1.33.0/specification/compatibility/prometheus_and_openmetrics.md#otlp-metric-points-to-prometheus. func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { var metricName string @@ -110,7 +111,7 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix // Build a normalized name for the specified metric func normalizeName(metric pmetric.Metric, namespace string) string { - // Split metric name in "tokens" (remove all non-alphanumeric) + // Split metric name into "tokens" (remove all non-alphanumerics) nameTokens := strings.FieldsFunc( metric.Name(), func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }, @@ -122,9 +123,9 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Main unit // Append if not blank, doesn't contain '{}', and is not present in metric name already if len(unitTokens) > 0 { - mainUnitOtel := strings.TrimSpace(unitTokens[0]) - if mainUnitOtel != "" && !strings.ContainsAny(mainUnitOtel, "{}") { - mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOtel)) + mainUnitOTel := strings.TrimSpace(unitTokens[0]) + if mainUnitOTel != "" && !strings.ContainsAny(mainUnitOTel, "{}") { + mainUnitProm := CleanUpString(unitMapGetOrDefault(mainUnitOTel)) if mainUnitProm != "" && !contains(nameTokens, mainUnitProm) { nameTokens = append(nameTokens, mainUnitProm) } @@ -133,11 +134,11 @@ func normalizeName(metric pmetric.Metric, namespace string) string { // Per unit // Append if not blank, doesn't contain '{}', and is not present in metric name already if len(unitTokens) > 1 && unitTokens[1] != "" { - perUnitOtel := strings.TrimSpace(unitTokens[1]) - if perUnitOtel != "" && !strings.ContainsAny(perUnitOtel, "{}") { - perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOtel)) + perUnitOTel := strings.TrimSpace(unitTokens[1]) + if perUnitOTel != "" && !strings.ContainsAny(perUnitOTel, "{}") { + perUnitProm := CleanUpString(perUnitMapGetOrDefault(perUnitOTel)) if perUnitProm != "" && !contains(nameTokens, perUnitProm) { - nameTokens = append(append(nameTokens, "per"), perUnitProm) + nameTokens = append(nameTokens, "per", perUnitProm) } } } @@ -150,7 +151,7 @@ func normalizeName(metric pmetric.Metric, namespace string) string { } // Append _ratio for metrics with unit "1" - // Some Otel receivers improperly use unit "1" for counters of objects + // Some OTel receivers improperly use unit "1" for counters of objects // See https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+some+metric+units+don%27t+follow+otel+semantic+conventions // Until these issues have been fixed, we're appending `_ratio` for gauges ONLY // Theoretically, counters could be ratios as well, but it's absurd (for mathematical reasons) diff --git a/storage/remote/otlptranslator/prometheus/normalize_name_test.go b/storage/remote/otlptranslator/prometheus/normalize_name_test.go new file mode 100644 index 000000000..07b9b0a78 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/normalize_name_test.go @@ -0,0 +1,205 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/normalize_name_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheus + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestByte(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) +} + +func TestByteCounter(t *testing.T) { + require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) + require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) +} + +func TestWhiteSpaces(t *testing.T) { + require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) +} + +func TestNonStandardUnit(t *testing.T) { + require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) +} + +func TestNonStandardUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) +} + +func TestBrokenUnit(t *testing.T) { + require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) +} + +func TestBrokenUnitCounter(t *testing.T) { + require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) + require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) +} + +func TestRatio(t *testing.T) { + require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) + require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) + require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) +} + +func TestHertz(t *testing.T) { + require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) +} + +func TestPer(t *testing.T) { + require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) + require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) +} + +func TestPercent(t *testing.T) { + require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) + require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) +} + +func TestEmpty(t *testing.T) { + require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) + require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) +} + +func TestUnsupportedRunes(t *testing.T) { + require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) + require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) + require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) +} + +func TestOTelReceivers(t *testing.T) { + require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) + require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) + require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) + require.Equal(t, "active_directory_ds_name_cache_hit_rate_percent", normalizeName(createGauge("active_directory.ds.name_cache.hit_rate", "%"), "")) + require.Equal(t, "active_directory_ds_ldap_bind_last_successful_time_milliseconds", normalizeName(createGauge("active_directory.ds.ldap.bind.last_successful.time", "ms"), "")) + require.Equal(t, "apache_current_connections", normalizeName(createGauge("apache.current_connections", "connections"), "")) + require.Equal(t, "apache_workers_connections", normalizeName(createGauge("apache.workers", "connections"), "")) + require.Equal(t, "apache_requests_total", normalizeName(createCounter("apache.requests", "1"), "")) + require.Equal(t, "bigip_virtual_server_request_count_total", normalizeName(createCounter("bigip.virtual_server.request.count", "{requests}"), "")) + require.Equal(t, "system_cpu_utilization_ratio", normalizeName(createGauge("system.cpu.utilization", "1"), "")) + require.Equal(t, "system_disk_operation_time_seconds_total", normalizeName(createCounter("system.disk.operation_time", "s"), "")) + require.Equal(t, "system_cpu_load_average_15m_ratio", normalizeName(createGauge("system.cpu.load_average.15m", "1"), "")) + require.Equal(t, "memcached_operation_hit_ratio_percent", normalizeName(createGauge("memcached.operation_hit_ratio", "%"), "")) + require.Equal(t, "mongodbatlas_process_asserts_per_second", normalizeName(createGauge("mongodbatlas.process.asserts", "{assertions}/s"), "")) + require.Equal(t, "mongodbatlas_process_journaling_data_files_mebibytes", normalizeName(createGauge("mongodbatlas.process.journaling.data_files", "MiBy"), "")) + require.Equal(t, "mongodbatlas_process_network_io_bytes_per_second", normalizeName(createGauge("mongodbatlas.process.network.io", "By/s"), "")) + require.Equal(t, "mongodbatlas_process_oplog_rate_gibibytes_per_hour", normalizeName(createGauge("mongodbatlas.process.oplog.rate", "GiBy/h"), "")) + require.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", normalizeName(createGauge("mongodbatlas.process.db.query_targeting.scanned_per_returned", "{scanned}/{returned}"), "")) + require.Equal(t, "nginx_requests", normalizeName(createGauge("nginx.requests", "requests"), "")) + require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) + require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) + require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) +} + +func TestTrimPromSuffixes(t *testing.T) { + assert.Equal(t, "active_directory_ds_replication_network_io", TrimPromSuffixes("active_directory_ds_replication_network_io_bytes_total", pmetric.MetricTypeSum, "bytes")) + assert.Equal(t, "active_directory_ds_name_cache_hit_rate", TrimPromSuffixes("active_directory_ds_name_cache_hit_rate_percent", pmetric.MetricTypeGauge, "percent")) + assert.Equal(t, "active_directory_ds_ldap_bind_last_successful_time", TrimPromSuffixes("active_directory_ds_ldap_bind_last_successful_time_milliseconds", pmetric.MetricTypeGauge, "milliseconds")) + assert.Equal(t, "apache_requests", TrimPromSuffixes("apache_requests_total", pmetric.MetricTypeSum, "1")) + assert.Equal(t, "system_cpu_utilization", TrimPromSuffixes("system_cpu_utilization_ratio", pmetric.MetricTypeGauge, "ratio")) + assert.Equal(t, "mongodbatlas_process_journaling_data_files", TrimPromSuffixes("mongodbatlas_process_journaling_data_files_mebibytes", pmetric.MetricTypeGauge, "mebibytes")) + assert.Equal(t, "mongodbatlas_process_network_io", TrimPromSuffixes("mongodbatlas_process_network_io_bytes_per_second", pmetric.MetricTypeGauge, "bytes_per_second")) + assert.Equal(t, "mongodbatlas_process_oplog_rate", TrimPromSuffixes("mongodbatlas_process_oplog_rate_gibibytes_per_hour", pmetric.MetricTypeGauge, "gibibytes_per_hour")) + assert.Equal(t, "nsxt_node_memory_usage", TrimPromSuffixes("nsxt_node_memory_usage_kilobytes", pmetric.MetricTypeGauge, "kilobytes")) + assert.Equal(t, "redis_latest_fork", TrimPromSuffixes("redis_latest_fork_microseconds", pmetric.MetricTypeGauge, "microseconds")) + assert.Equal(t, "up", TrimPromSuffixes("up", pmetric.MetricTypeGauge, "")) + + // These are not necessarily valid OM units, only tested for the sake of completeness. + assert.Equal(t, "active_directory_ds_replication_sync_object_pending", TrimPromSuffixes("active_directory_ds_replication_sync_object_pending_total", pmetric.MetricTypeSum, "{objects}")) + assert.Equal(t, "apache_current", TrimPromSuffixes("apache_current_connections", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "bigip_virtual_server_request_count", TrimPromSuffixes("bigip_virtual_server_request_count_total", pmetric.MetricTypeSum, "{requests}")) + assert.Equal(t, "mongodbatlas_process_db_query_targeting_scanned_per_returned", TrimPromSuffixes("mongodbatlas_process_db_query_targeting_scanned_per_returned", pmetric.MetricTypeGauge, "{scanned}/{returned}")) + assert.Equal(t, "nginx_connections_accepted", TrimPromSuffixes("nginx_connections_accepted", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "apache_workers", TrimPromSuffixes("apache_workers_connections", pmetric.MetricTypeGauge, "connections")) + assert.Equal(t, "nginx", TrimPromSuffixes("nginx_requests", pmetric.MetricTypeGauge, "requests")) + + // Units shouldn't be trimmed if the unit is not a direct match with the suffix, i.e, a suffix "_seconds" shouldn't be removed if unit is "sec" or "s" + assert.Equal(t, "system_cpu_load_average_15m_ratio", TrimPromSuffixes("system_cpu_load_average_15m_ratio", pmetric.MetricTypeGauge, "1")) + assert.Equal(t, "mongodbatlas_process_asserts_per_second", TrimPromSuffixes("mongodbatlas_process_asserts_per_second", pmetric.MetricTypeGauge, "{assertions}/s")) + assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%")) + assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s")) + assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s")) +} + +func TestNamespace(t *testing.T) { + require.Equal(t, "space_test", normalizeName(createGauge("test", ""), "space")) + require.Equal(t, "space_test", normalizeName(createGauge("#test", ""), "space")) +} + +func TestCleanUpString(t *testing.T) { + require.Equal(t, "", CleanUpString("")) + require.Equal(t, "a_b", CleanUpString("a b")) + require.Equal(t, "hello_world", CleanUpString("hello, world!")) + require.Equal(t, "hello_you_2", CleanUpString("hello you 2")) + require.Equal(t, "1000", CleanUpString("$1000")) + require.Equal(t, "", CleanUpString("*+$^=)")) +} + +func TestUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", unitMapGetOrDefault("")) + require.Equal(t, "seconds", unitMapGetOrDefault("s")) + require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) +} + +func TestPerUnitMapGetOrDefault(t *testing.T) { + require.Equal(t, "", perUnitMapGetOrDefault("")) + require.Equal(t, "second", perUnitMapGetOrDefault("s")) + require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) +} + +func TestRemoveItem(t *testing.T) { + require.Equal(t, []string{}, removeItem([]string{}, "test")) + require.Equal(t, []string{}, removeItem([]string{}, "")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) + require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "")) + require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) + require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) + require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) +} + +func TestBuildCompliantNameWithNormalize(t *testing.T) { + require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", true)) + require.Equal(t, "system_network_io_bytes_total", BuildCompliantName(createCounter("network.io", "By"), "system", true)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", ""), "", true)) + require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", true)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", true)) + require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", true)) + // Gauges with unit 1 are considered ratios. + require.Equal(t, "foo_bar_ratio", BuildCompliantName(createGauge("foo.bar", "1"), "", true)) + // Slashes in units are converted. + require.Equal(t, "system_io_foo_per_bar_total", BuildCompliantName(createCounter("system.io", "foo/bar"), "", true)) +} + +func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", false)) + require.Equal(t, "system_network_io", BuildCompliantName(createCounter("network.io", "By"), "system", false)) + require.Equal(t, "system_network_I_O", BuildCompliantName(createCounter("network (I/O)", "By"), "system", false)) + require.Equal(t, "_3_14_digits", BuildCompliantName(createGauge("3.14 digits", "By"), "", false)) + require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", false)) + require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", false)) + require.Equal(t, "foo_bar", BuildCompliantName(createGauge("foo.bar", "1"), "", false)) + require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "foo/bar"), "", false)) +} diff --git a/storage/remote/otlptranslator/prometheus/testutils_test.go b/storage/remote/otlptranslator/prometheus/testutils_test.go new file mode 100644 index 000000000..363328c57 --- /dev/null +++ b/storage/remote/otlptranslator/prometheus/testutils_test.go @@ -0,0 +1,49 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/95e8f8fdc2a9dc87230406c9a3cf02be4fd68bea/pkg/translator/prometheus/testutils_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheus + +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var ilm pmetric.ScopeMetrics + +func init() { + + metrics := pmetric.NewMetrics() + resourceMetrics := metrics.ResourceMetrics().AppendEmpty() + ilm = resourceMetrics.ScopeMetrics().AppendEmpty() + +} + +// Returns a new Metric of type "Gauge" with specified name and unit +func createGauge(name string, unit string) pmetric.Metric { + gauge := ilm.Metrics().AppendEmpty() + gauge.SetName(name) + gauge.SetUnit(unit) + gauge.SetEmptyGauge() + return gauge +} + +// Returns a new Metric of type Monotonic Sum with specified name and unit +func createCounter(name string, unit string) pmetric.Metric { + counter := ilm.Metrics().AppendEmpty() + counter.SetEmptySum().SetIsMonotonic(true) + counter.SetName(name) + counter.SetUnit(unit) + return counter +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index f2d7ecd4e..67cf28119 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -24,7 +24,6 @@ import ( "slices" "sort" "strconv" - "time" "unicode/utf8" "github.com/cespare/xxhash/v2" @@ -594,5 +593,5 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms func convertTimeStamp(timestamp pcommon.Timestamp) int64 { - return timestamp.AsTime().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) + return int64(timestamp) / 1_000_000 } diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go index c4dd781ae..e02ebbf5d 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper_test.go @@ -10,13 +10,21 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/debbf30360b8d3a0ded8db09c4419d2a9c99b94a/pkg/translator/prometheusremotewrite/helper_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + package prometheusremotewrite import ( "testing" + "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/prompb" ) @@ -159,3 +167,239 @@ func TestCreateAttributes(t *testing.T) { }) } } + +func Test_convertTimeStamp(t *testing.T) { + tests := []struct { + name string + arg pcommon.Timestamp + want int64 + }{ + {"zero", 0, 0}, + {"1ms", 1_000_000, 1}, + {"1s", pcommon.Timestamp(time.Unix(1, 0).UnixNano()), 1000}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertTimeStamp(tt.arg) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestPrometheusConverter_AddSummaryDataPoints(t *testing.T) { + ts := pcommon.Timestamp(time.Now().UnixNano()) + tests := []struct { + name string + metric func() pmetric.Metric + want func() map[uint64]*prompb.TimeSeries + }{ + { + name: "summary with start time", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_summary") + metric.SetEmptySummary() + + dp := metric.Summary().DataPoints().AppendEmpty() + dp.SetTimestamp(ts) + dp.SetStartTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, + } + createdLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_summary" + createdSuffix}, + } + sumLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(sumLabels): { + Labels: sumLabels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(createdLabels): { + Labels: createdLabels, + Samples: []prompb.Sample{ + {Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + { + name: "summary without start time", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_summary") + metric.SetEmptySummary() + + dp := metric.Summary().DataPoints().AppendEmpty() + dp.SetTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_summary" + countStr}, + } + sumLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_summary" + sumStr}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(sumLabels): { + Labels: sumLabels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := tt.metric() + converter := NewPrometheusConverter() + + converter.addSummaryDataPoints( + metric.Summary().DataPoints(), + pcommon.NewResource(), + Settings{ + ExportCreatedMetric: true, + }, + metric.Name(), + ) + + assert.Equal(t, tt.want(), converter.unique) + assert.Empty(t, converter.conflicts) + }) + } +} + +func TestPrometheusConverter_AddHistogramDataPoints(t *testing.T) { + ts := pcommon.Timestamp(time.Now().UnixNano()) + tests := []struct { + name string + metric func() pmetric.Metric + want func() map[uint64]*prompb.TimeSeries + }{ + { + name: "histogram with start time", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_hist") + metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + pt := metric.Histogram().DataPoints().AppendEmpty() + pt.SetTimestamp(ts) + pt.SetStartTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, + } + createdLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist" + createdSuffix}, + } + infLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, + {Name: model.BucketLabel, Value: "+Inf"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(infLabels): { + Labels: infLabels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(createdLabels): { + Labels: createdLabels, + Samples: []prompb.Sample{ + {Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + { + name: "histogram without start time", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_hist") + metric.SetEmptyHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + pt := metric.Histogram().DataPoints().AppendEmpty() + pt.SetTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist" + countStr}, + } + infLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist_bucket"}, + {Name: model.BucketLabel, Value: "+Inf"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(infLabels): { + Labels: infLabels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := tt.metric() + converter := NewPrometheusConverter() + + converter.addHistogramDataPoints( + metric.Histogram().DataPoints(), + pcommon.NewResource(), + Settings{ + ExportCreatedMetric: true, + }, + metric.Name(), + ) + + assert.Equal(t, tt.want(), converter.unique) + assert.Empty(t, converter.conflicts) + }) + } +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 73528019d..ec93387fc 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/util/annotations" ) const defaultZeroThreshold = 1e-128 @@ -33,13 +34,15 @@ const defaultZeroThreshold = 1e-128 // addExponentialHistogramDataPoints adds OTel exponential histogram data points to the corresponding time series // as native histogram samples. func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetric.ExponentialHistogramDataPointSlice, - resource pcommon.Resource, settings Settings, promName string) error { + resource pcommon.Resource, settings Settings, promName string) (annotations.Annotations, error) { + var annots annotations.Annotations for x := 0; x < dataPoints.Len(); x++ { pt := dataPoints.At(x) - histogram, err := exponentialToNativeHistogram(pt) + histogram, ws, err := exponentialToNativeHistogram(pt) + annots.Merge(ws) if err != nil { - return err + return annots, err } lbls := createAttributes( @@ -58,15 +61,16 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(dataPoints pmetr ts.Exemplars = append(ts.Exemplars, exemplars...) } - return nil + return annots, nil } // exponentialToNativeHistogram translates OTel Exponential Histogram data point // to Prometheus Native Histogram. -func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) { +func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { + var annots annotations.Annotations scale := p.Scale() if scale < -4 { - return prompb.Histogram{}, + return prompb.Histogram{}, annots, fmt.Errorf("cannot convert exponential to native histogram."+ " Scale must be >= -4, was %d", scale) } @@ -114,8 +118,11 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom h.Sum = p.Sum() } h.Count = &prompb.Histogram_CountInt{CountInt: p.Count()} + if p.Count() == 0 && h.Sum != 0 { + annots.Add(fmt.Errorf("exponential histogram data point has zero count, but non-zero sum: %f", h.Sum)) + } } - return h, nil + return h, annots, nil } // convertBucketsLayout translates OTel Exponential Histogram dense buckets diff --git a/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go new file mode 100644 index 000000000..cd1c858ac --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/histograms_test.go @@ -0,0 +1,771 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/histograms_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheusremotewrite + +import ( + "fmt" + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" +) + +type expectedBucketLayout struct { + wantSpans []prompb.BucketSpan + wantDeltas []int64 +} + +func TestConvertBucketsLayout(t *testing.T) { + tests := []struct { + name string + buckets func() pmetric.ExponentialHistogramDataPointBuckets + wantLayout map[int32]expectedBucketLayout + }{ + { + name: "zero offset", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(0) + b.BucketCounts().FromRaw([]uint64{4, 3, 2, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 1, + Length: 4, + }, + }, + wantDeltas: []int64{4, -1, -1, -1}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 1, + Length: 2, + }, + }, + // 4+3, 2+1 = 7, 3 =delta= 7, -4 + wantDeltas: []int64{7, -4}, + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 1, + Length: 1, + }, + }, + // 4+3+2+1 = 10 =delta= 10 + wantDeltas: []int64{10}, + }, + }, + }, + { + name: "offset 1", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(1) + b.BucketCounts().FromRaw([]uint64{4, 3, 2, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 2, + Length: 4, + }, + }, + wantDeltas: []int64{4, -1, -1, -1}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 1, + Length: 3, + }, + }, + wantDeltas: []int64{4, 1, -4}, // 0+4, 3+2, 1+0 = 4, 5, 1 + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 1, + Length: 2, + }, + }, + wantDeltas: []int64{9, -8}, // 0+4+3+2, 1+0+0+0 = 9, 1 + }, + }, + }, + { + name: "positive offset", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(4) + b.BucketCounts().FromRaw([]uint64{4, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 5, + Length: 4, + }, + { + Offset: 12, + Length: 1, + }, + }, + wantDeltas: []int64{4, -2, -2, 2, -1}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 3, + Length: 2, + }, + { + Offset: 6, + Length: 1, + }, + }, + // Downscale: + // 4+2, 0+2, 0+0, 0+0, 0+0, 0+0, 0+0, 0+0, 1+0 = 6, 2, 0, 0, 0, 0, 0, 0, 1 + wantDeltas: []int64{6, -4, -1}, + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 2, + Length: 1, + }, + { + Offset: 3, + Length: 1, + }, + }, + // Downscale: + // 4+2+0+2, 0+0+0+0, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 0, 1 + // Check from sclaing from previous: 6+2, 0+0, 0+0, 0+0, 1+0 = 8, 0, 0, 0, 1 + wantDeltas: []int64{8, -7}, + }, + }, + }, + { + name: "scaledown merges spans", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(4) + b.BucketCounts().FromRaw([]uint64{4, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 5, + Length: 4, + }, + { + Offset: 8, + Length: 1, + }, + }, + wantDeltas: []int64{4, -2, -2, 2, -1}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 3, + Length: 2, + }, + { + Offset: 4, + Length: 1, + }, + }, + // Downscale: + // 4+2, 0+2, 0+0, 0+0, 0+0, 0+0, 1+0 = 6, 2, 0, 0, 0, 0, 1 + wantDeltas: []int64{6, -4, -1}, + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 2, + Length: 4, + }, + }, + // Downscale: + // 4+2+0+2, 0+0+0+0, 0+0+0+0, 1+0+0+0 = 8, 0, 0, 1 + // Check from sclaing from previous: 6+2, 0+0, 0+0, 1+0 = 8, 0, 0, 1 + wantDeltas: []int64{8, -8, 0, 1}, + }, + }, + }, + { + name: "negative offset", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(-2) + b.BucketCounts().FromRaw([]uint64{3, 1, 0, 0, 0, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: -1, + Length: 2, + }, + { + Offset: 3, + Length: 1, + }, + }, + wantDeltas: []int64{3, -2, 0}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 3, + }, + }, + // Downscale: + // 3+1, 0+0, 0+1 = 4, 0, 1 + wantDeltas: []int64{4, -4, 1}, + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 2, + }, + }, + // Downscale: + // 0+0+3+1, 0+0+0+0 = 4, 1 + wantDeltas: []int64{4, -3}, + }, + }, + }, + { + name: "buckets with gaps of size 1", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(-2) + b.BucketCounts().FromRaw([]uint64{3, 1, 0, 1, 0, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: -1, + Length: 6, + }, + }, + wantDeltas: []int64{3, -2, -1, 1, -1, 1}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 3, + }, + }, + // Downscale: + // 3+1, 0+1, 0+1 = 4, 1, 1 + wantDeltas: []int64{4, -3, 0}, + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 2, + }, + }, + // Downscale: + // 0+0+3+1, 0+1+0+1 = 4, 2 + wantDeltas: []int64{4, -2}, + }, + }, + }, + { + name: "buckets with gaps of size 2", + buckets: func() pmetric.ExponentialHistogramDataPointBuckets { + b := pmetric.NewExponentialHistogramDataPointBuckets() + b.SetOffset(-2) + b.BucketCounts().FromRaw([]uint64{3, 0, 0, 1, 0, 0, 1}) + return b + }, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: []prompb.BucketSpan{ + { + Offset: -1, + Length: 7, + }, + }, + wantDeltas: []int64{3, -3, 0, 1, -1, 0, 1}, + }, + 1: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 4, + }, + }, + // Downscale: + // 3+0, 0+1, 0+0, 0+1 = 3, 1, 0, 1 + wantDeltas: []int64{3, -2, -1, 1}, + }, + 2: { + wantSpans: []prompb.BucketSpan{ + { + Offset: 0, + Length: 3, + }, + }, + // Downscale: + // 0+0+3+0, 0+1+0+0, 1+0+0+0 = 3, 1, 1 + wantDeltas: []int64{3, -2, 0}, + }, + }, + }, + { + name: "zero buckets", + buckets: pmetric.NewExponentialHistogramDataPointBuckets, + wantLayout: map[int32]expectedBucketLayout{ + 0: { + wantSpans: nil, + wantDeltas: nil, + }, + 1: { + wantSpans: nil, + wantDeltas: nil, + }, + 2: { + wantSpans: nil, + wantDeltas: nil, + }, + }, + }, + } + for _, tt := range tests { + for scaleDown, wantLayout := range tt.wantLayout { + t.Run(fmt.Sprintf("%s-scaleby-%d", tt.name, scaleDown), func(t *testing.T) { + gotSpans, gotDeltas := convertBucketsLayout(tt.buckets(), scaleDown) + assert.Equal(t, wantLayout.wantSpans, gotSpans) + assert.Equal(t, wantLayout.wantDeltas, gotDeltas) + }) + } + } +} + +func BenchmarkConvertBucketLayout(b *testing.B) { + scenarios := []struct { + gap int + }{ + {gap: 0}, + {gap: 1}, + {gap: 2}, + {gap: 3}, + } + + for _, scenario := range scenarios { + buckets := pmetric.NewExponentialHistogramDataPointBuckets() + buckets.SetOffset(0) + for i := 0; i < 1000; i++ { + if i%(scenario.gap+1) == 0 { + buckets.BucketCounts().Append(10) + } else { + buckets.BucketCounts().Append(0) + } + } + b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) { + for i := 0; i < b.N; i++ { + convertBucketsLayout(buckets, 0) + } + }) + } +} + +func TestExponentialToNativeHistogram(t *testing.T) { + tests := []struct { + name string + exponentialHist func() pmetric.ExponentialHistogramDataPoint + wantNativeHist func() prompb.Histogram + wantErrMessage string + }{ + { + name: "convert exp. to native histogram", + exponentialHist: func() pmetric.ExponentialHistogramDataPoint { + pt := pmetric.NewExponentialHistogramDataPoint() + pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) + pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) + pt.SetCount(4) + pt.SetSum(10.1) + pt.SetScale(1) + pt.SetZeroCount(1) + + pt.Positive().BucketCounts().FromRaw([]uint64{1, 1}) + pt.Positive().SetOffset(1) + + pt.Negative().BucketCounts().FromRaw([]uint64{1, 1}) + pt.Negative().SetOffset(1) + + return pt + }, + wantNativeHist: func() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 4}, + Sum: 10.1, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, + NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, + NegativeDeltas: []int64{1, 0}, + PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, + PositiveDeltas: []int64{1, 0}, + Timestamp: 500, + } + }, + }, + { + name: "convert exp. to native histogram with no sum", + exponentialHist: func() pmetric.ExponentialHistogramDataPoint { + pt := pmetric.NewExponentialHistogramDataPoint() + pt.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) + pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) + + pt.SetCount(4) + pt.SetScale(1) + pt.SetZeroCount(1) + + pt.Positive().BucketCounts().FromRaw([]uint64{1, 1}) + pt.Positive().SetOffset(1) + + pt.Negative().BucketCounts().FromRaw([]uint64{1, 1}) + pt.Negative().SetOffset(1) + + return pt + }, + wantNativeHist: func() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 4}, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, + NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, + NegativeDeltas: []int64{1, 0}, + PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, + PositiveDeltas: []int64{1, 0}, + Timestamp: 500, + } + }, + }, + { + name: "invalid negative scale", + exponentialHist: func() pmetric.ExponentialHistogramDataPoint { + pt := pmetric.NewExponentialHistogramDataPoint() + pt.SetScale(-10) + return pt + }, + wantErrMessage: "cannot convert exponential to native histogram." + + " Scale must be >= -4, was -10", + }, + { + name: "no downscaling at scale 8", + exponentialHist: func() pmetric.ExponentialHistogramDataPoint { + pt := pmetric.NewExponentialHistogramDataPoint() + pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) + pt.SetCount(6) + pt.SetSum(10.1) + pt.SetScale(8) + pt.SetZeroCount(1) + + pt.Positive().BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt.Positive().SetOffset(1) + + pt.Negative().BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt.Negative().SetOffset(2) + return pt + }, + wantNativeHist: func() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 6}, + Sum: 10.1, + Schema: 8, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, + PositiveSpans: []prompb.BucketSpan{{Offset: 2, Length: 3}}, + PositiveDeltas: []int64{1, 0, 0}, // 1, 1, 1 + NegativeSpans: []prompb.BucketSpan{{Offset: 3, Length: 3}}, + NegativeDeltas: []int64{1, 0, 0}, // 1, 1, 1 + Timestamp: 500, + } + }, + }, + { + name: "downsample if scale is more than 8", + exponentialHist: func() pmetric.ExponentialHistogramDataPoint { + pt := pmetric.NewExponentialHistogramDataPoint() + pt.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) + pt.SetCount(6) + pt.SetSum(10.1) + pt.SetScale(9) + pt.SetZeroCount(1) + + pt.Positive().BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt.Positive().SetOffset(1) + + pt.Negative().BucketCounts().FromRaw([]uint64{1, 1, 1}) + pt.Negative().SetOffset(2) + return pt + }, + wantNativeHist: func() prompb.Histogram { + return prompb.Histogram{ + Count: &prompb.Histogram_CountInt{CountInt: 6}, + Sum: 10.1, + Schema: 8, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 1}, + PositiveSpans: []prompb.BucketSpan{{Offset: 1, Length: 2}}, + PositiveDeltas: []int64{1, 1}, // 0+1, 1+1 = 1, 2 + NegativeSpans: []prompb.BucketSpan{{Offset: 2, Length: 2}}, + NegativeDeltas: []int64{2, -1}, // 1+1, 1+0 = 2, 1 + Timestamp: 500, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + validateExponentialHistogramCount(t, tt.exponentialHist()) // Sanity check. + got, annots, err := exponentialToNativeHistogram(tt.exponentialHist()) + if tt.wantErrMessage != "" { + assert.ErrorContains(t, err, tt.wantErrMessage) + return + } + + require.NoError(t, err) + require.Empty(t, annots) + assert.Equal(t, tt.wantNativeHist(), got) + validateNativeHistogramCount(t, got) + }) + } +} + +func validateExponentialHistogramCount(t *testing.T, h pmetric.ExponentialHistogramDataPoint) { + actualCount := uint64(0) + for _, bucket := range h.Positive().BucketCounts().AsRaw() { + actualCount += bucket + } + for _, bucket := range h.Negative().BucketCounts().AsRaw() { + actualCount += bucket + } + require.Equal(t, h.Count(), actualCount, "exponential histogram count mismatch") +} + +func validateNativeHistogramCount(t *testing.T, h prompb.Histogram) { + require.NotNil(t, h.Count) + require.IsType(t, &prompb.Histogram_CountInt{}, h.Count) + want := h.Count.(*prompb.Histogram_CountInt).CountInt + var ( + actualCount uint64 + prevBucket int64 + ) + for _, delta := range h.PositiveDeltas { + prevBucket += delta + actualCount += uint64(prevBucket) + } + prevBucket = 0 + for _, delta := range h.NegativeDeltas { + prevBucket += delta + actualCount += uint64(prevBucket) + } + assert.Equal(t, want, actualCount, "native histogram count mismatch") +} + +func TestPrometheusConverter_addExponentialHistogramDataPoints(t *testing.T) { + tests := []struct { + name string + metric func() pmetric.Metric + wantSeries func() map[uint64]*prompb.TimeSeries + }{ + { + name: "histogram data points with same labels", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_hist") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() + pt.SetCount(7) + pt.SetScale(1) + pt.Positive().SetOffset(-1) + pt.Positive().BucketCounts().FromRaw([]uint64{4, 2}) + pt.Exemplars().AppendEmpty().SetDoubleValue(1) + pt.Attributes().PutStr("attr", "test_attr") + + pt = metric.ExponentialHistogram().DataPoints().AppendEmpty() + pt.SetCount(4) + pt.SetScale(1) + pt.Positive().SetOffset(-1) + pt.Positive().BucketCounts().FromRaw([]uint64{4, 2, 1}) + pt.Exemplars().AppendEmpty().SetDoubleValue(2) + pt.Attributes().PutStr("attr", "test_attr") + + return metric + }, + wantSeries: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist"}, + {Name: "attr", Value: "test_attr"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 7}, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, + PositiveDeltas: []int64{4, -2}, + }, + { + Count: &prompb.Histogram_CountInt{CountInt: 4}, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, + PositiveDeltas: []int64{4, -2, -1}, + }, + }, + Exemplars: []prompb.Exemplar{ + {Value: 1}, + {Value: 2}, + }, + }, + } + }, + }, + { + name: "histogram data points with different labels", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_hist") + metric.SetEmptyExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + + pt := metric.ExponentialHistogram().DataPoints().AppendEmpty() + pt.SetCount(7) + pt.SetScale(1) + pt.Positive().SetOffset(-1) + pt.Positive().BucketCounts().FromRaw([]uint64{4, 2}) + pt.Exemplars().AppendEmpty().SetDoubleValue(1) + pt.Attributes().PutStr("attr", "test_attr") + + pt = metric.ExponentialHistogram().DataPoints().AppendEmpty() + pt.SetCount(4) + pt.SetScale(1) + pt.Negative().SetOffset(-1) + pt.Negative().BucketCounts().FromRaw([]uint64{4, 2, 1}) + pt.Exemplars().AppendEmpty().SetDoubleValue(2) + pt.Attributes().PutStr("attr", "test_attr_two") + + return metric + }, + wantSeries: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist"}, + {Name: "attr", Value: "test_attr"}, + } + labelsAnother := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_hist"}, + {Name: "attr", Value: "test_attr_two"}, + } + + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 7}, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + PositiveSpans: []prompb.BucketSpan{{Offset: 0, Length: 2}}, + PositiveDeltas: []int64{4, -2}, + }, + }, + Exemplars: []prompb.Exemplar{ + {Value: 1}, + }, + }, + timeSeriesSignature(labelsAnother): { + Labels: labelsAnother, + Histograms: []prompb.Histogram{ + { + Count: &prompb.Histogram_CountInt{CountInt: 4}, + Schema: 1, + ZeroThreshold: defaultZeroThreshold, + ZeroCount: &prompb.Histogram_ZeroCountInt{ZeroCountInt: 0}, + NegativeSpans: []prompb.BucketSpan{{Offset: 0, Length: 3}}, + NegativeDeltas: []int64{4, -2, -1}, + }, + }, + Exemplars: []prompb.Exemplar{ + {Value: 2}, + }, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := tt.metric() + + converter := NewPrometheusConverter() + annots, err := converter.addExponentialHistogramDataPoints( + metric.ExponentialHistogram().DataPoints(), + pcommon.NewResource(), + Settings{ + ExportCreatedMetric: true, + }, + prometheustranslator.BuildCompliantName(metric, "", true), + ) + require.NoError(t, err) + require.Empty(t, annots) + + assert.Equal(t, tt.wantSeries(), converter.unique) + assert.Empty(t, converter.conflicts) + }) + } +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index a3a789723..9d7680080 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/prompb" prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" + "github.com/prometheus/prometheus/util/annotations" ) type Settings struct { @@ -53,7 +54,7 @@ func NewPrometheusConverter() *PrometheusConverter { } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (errs error) { +func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) (annots annotations.Annotations, errs error) { resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { resourceMetrics := resourceMetricsSlice.At(i) @@ -107,12 +108,14 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) break } - errs = multierr.Append(errs, c.addExponentialHistogramDataPoints( + ws, err := c.addExponentialHistogramDataPoints( dataPoints, resource, settings, promName, - )) + ) + annots.Merge(ws) + errs = multierr.Append(errs, err) case pmetric.MetricTypeSummary: dataPoints := metric.Summary().DataPoints() if dataPoints.Len() == 0 { @@ -128,7 +131,7 @@ func (c *PrometheusConverter) FromMetrics(md pmetric.Metrics, settings Settings) addResourceTargetInfo(resource, settings, mostRecentTimestamp, c) } - return + return annots, errs } func isSameMetric(ts *prompb.TimeSeries, lbls []prompb.Label) bool { diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go index 37ac67774..bdc1c9d0b 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw_test.go @@ -27,6 +27,41 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" ) +func TestFromMetrics(t *testing.T) { + t.Run("exponential histogram warnings for zero count and non-zero sum", func(t *testing.T) { + request := pmetricotlp.NewExportRequest() + rm := request.Metrics().ResourceMetrics().AppendEmpty() + generateAttributes(rm.Resource().Attributes(), "resource", 10) + + metrics := rm.ScopeMetrics().AppendEmpty().Metrics() + ts := pcommon.NewTimestampFromTime(time.Now()) + + for i := 1; i <= 10; i++ { + m := metrics.AppendEmpty() + m.SetEmptyExponentialHistogram() + m.SetName(fmt.Sprintf("histogram-%d", i)) + m.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + h := m.ExponentialHistogram().DataPoints().AppendEmpty() + h.SetTimestamp(ts) + + h.SetCount(0) + h.SetSum(155) + + generateAttributes(h.Attributes(), "series", 10) + } + + converter := NewPrometheusConverter() + annots, err := converter.FromMetrics(request.Metrics(), Settings{}) + require.NoError(t, err) + require.NotEmpty(t, annots) + ws, infos := annots.AsStrings("", 0, 0) + require.Empty(t, infos) + require.Equal(t, []string{ + "exponential histogram data point has zero count, but non-zero sum: 155.000000", + }, ws) + }) +} + func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { for _, resourceAttributeCount := range []int{0, 5, 50} { b.Run(fmt.Sprintf("resource attribute count: %v", resourceAttributeCount), func(b *testing.B) { @@ -49,7 +84,9 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) { for i := 0; i < b.N; i++ { converter := NewPrometheusConverter() - require.NoError(b, converter.FromMetrics(payload.Metrics(), Settings{})) + annots, err := converter.FromMetrics(payload.Metrics(), Settings{}) + require.NoError(b, err) + require.Empty(b, annots) require.NotNil(b, converter.TimeSeries()) } }) diff --git a/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go new file mode 100644 index 000000000..41afc8c4c --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/number_data_points_test.go @@ -0,0 +1,258 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/number_data_points_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheusremotewrite + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/prompb" + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestPrometheusConverter_addGaugeNumberDataPoints(t *testing.T) { + ts := uint64(time.Now().UnixNano()) + tests := []struct { + name string + metric func() pmetric.Metric + want func() map[uint64]*prompb.TimeSeries + }{ + { + name: "gauge", + metric: func() pmetric.Metric { + return getIntGaugeMetric( + "test", + pcommon.NewMap(), + 1, ts, + ) + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + { + Value: 1, + Timestamp: convertTimeStamp(pcommon.Timestamp(ts)), + }}, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := tt.metric() + converter := NewPrometheusConverter() + + converter.addGaugeNumberDataPoints( + metric.Gauge().DataPoints(), + pcommon.NewResource(), + Settings{ + ExportCreatedMetric: true, + }, + metric.Name(), + ) + + assert.Equal(t, tt.want(), converter.unique) + assert.Empty(t, converter.conflicts) + }) + } +} + +func TestPrometheusConverter_addSumNumberDataPoints(t *testing.T) { + ts := pcommon.Timestamp(time.Now().UnixNano()) + tests := []struct { + name string + metric func() pmetric.Metric + want func() map[uint64]*prompb.TimeSeries + }{ + { + name: "sum", + metric: func() pmetric.Metric { + return getIntSumMetric( + "test", + pcommon.NewMap(), + 1, + uint64(ts.AsTime().UnixNano()), + ) + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + { + Value: 1, + Timestamp: convertTimeStamp(ts), + }}, + }, + } + }, + }, + { + name: "sum with exemplars", + metric: func() pmetric.Metric { + m := getIntSumMetric( + "test", + pcommon.NewMap(), + 1, + uint64(ts.AsTime().UnixNano()), + ) + m.Sum().DataPoints().At(0).Exemplars().AppendEmpty().SetDoubleValue(2) + return m + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{{ + Value: 1, + Timestamp: convertTimeStamp(ts), + }}, + Exemplars: []prompb.Exemplar{ + {Value: 2}, + }, + }, + } + }, + }, + { + name: "monotonic cumulative sum with start timestamp", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_sum") + metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + metric.SetEmptySum().SetIsMonotonic(true) + + dp := metric.Sum().DataPoints().AppendEmpty() + dp.SetDoubleValue(1) + dp.SetTimestamp(ts) + dp.SetStartTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_sum"}, + } + createdLabels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_sum" + createdSuffix}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 1, Timestamp: convertTimeStamp(ts)}, + }, + }, + timeSeriesSignature(createdLabels): { + Labels: createdLabels, + Samples: []prompb.Sample{ + {Value: float64(convertTimeStamp(ts)), Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + { + name: "monotonic cumulative sum with no start time", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_sum") + metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + metric.SetEmptySum().SetIsMonotonic(true) + + dp := metric.Sum().DataPoints().AppendEmpty() + dp.SetTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_sum"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + { + name: "non-monotonic cumulative sum with start time", + metric: func() pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName("test_sum") + metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + metric.SetEmptySum().SetIsMonotonic(false) + + dp := metric.Sum().DataPoints().AppendEmpty() + dp.SetTimestamp(ts) + + return metric + }, + want: func() map[uint64]*prompb.TimeSeries { + labels := []prompb.Label{ + {Name: model.MetricNameLabel, Value: "test_sum"}, + } + return map[uint64]*prompb.TimeSeries{ + timeSeriesSignature(labels): { + Labels: labels, + Samples: []prompb.Sample{ + {Value: 0, Timestamp: convertTimeStamp(ts)}, + }, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metric := tt.metric() + converter := NewPrometheusConverter() + + converter.addSumNumberDataPoints( + metric.Sum().DataPoints(), + pcommon.NewResource(), + metric, + Settings{ + ExportCreatedMetric: true, + }, + metric.Name(), + ) + + assert.Equal(t, tt.want(), converter.unique) + assert.Empty(t, converter.conflicts) + }) + } +} diff --git a/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go b/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go new file mode 100644 index 000000000..187127fcb --- /dev/null +++ b/storage/remote/otlptranslator/prometheusremotewrite/testutil_test.go @@ -0,0 +1,55 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Provenance-includes-location: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/247a9f996e09a83cdc25addf70c05e42b8b30186/pkg/translator/prometheusremotewrite/testutil_test.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Copyright The OpenTelemetry Authors. + +package prometheusremotewrite + +import ( + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func getIntGaugeMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + dp := metric.SetEmptyGauge().DataPoints().AppendEmpty() + if strings.HasPrefix(name, "staleNaN") { + dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } + dp.SetIntValue(value) + attributes.CopyTo(dp.Attributes()) + + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) + return metric +} + +func getIntSumMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() + metric.SetName(name) + metric.SetEmptySum().SetAggregationTemporality(pmetric.AggregationTemporalityCumulative) + dp := metric.Sum().DataPoints().AppendEmpty() + if strings.HasPrefix(name, "staleNaN") { + dp.SetFlags(pmetric.DefaultDataPointFlags.WithNoRecordedValue(true)) + } + dp.SetIntValue(value) + attributes.CopyTo(dp.Attributes()) + + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) + return metric +} diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 17ff1850f..b1c899726 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -1522,7 +1522,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { // Send batches of at most MaxSamplesPerSend samples to the remote storage. // If we have fewer samples than that, flush them out after a deadline anyways. var ( - max = s.qm.cfg.MaxSamplesPerSend + maxCount = s.qm.cfg.MaxSamplesPerSend pBuf = proto.NewBuffer(nil) pBufRaw []byte @@ -1530,19 +1530,19 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { ) // TODO(@tpaschalis) Should we also raise the max if we have WAL metadata? if s.qm.sendExemplars { - max += int(float64(max) * 0.1) + maxCount += int(float64(maxCount) * 0.1) } // TODO: Dry all of this, we should make an interface/generic for the timeseries type. batchQueue := queue.Chan() - pendingData := make([]prompb.TimeSeries, max) + pendingData := make([]prompb.TimeSeries, maxCount) for i := range pendingData { pendingData[i].Samples = []prompb.Sample{{}} if s.qm.sendExemplars { pendingData[i].Exemplars = []prompb.Exemplar{{}} } } - pendingDataV2 := make([]writev2.TimeSeries, max) + pendingDataV2 := make([]writev2.TimeSeries, maxCount) for i := range pendingDataV2 { pendingDataV2[i].Samples = []writev2.Sample{{}} } diff --git a/storage/remote/queue_manager_test.go b/storage/remote/queue_manager_test.go index 1c06173a5..032a1a92f 100644 --- a/storage/remote/queue_manager_test.go +++ b/storage/remote/queue_manager_test.go @@ -930,7 +930,7 @@ func createHistograms(numSamples, numSeries int, floatHistogram bool) ([]record. } func createSeriesMetadata(series []record.RefSeries) []record.RefMetadata { - metas := make([]record.RefMetadata, len(series)) + metas := make([]record.RefMetadata, 0, len(series)) for _, s := range series { metas = append(metas, record.RefMetadata{ diff --git a/storage/remote/read.go b/storage/remote/read.go index e54b14f1e..2ec48784d 100644 --- a/storage/remote/read.go +++ b/storage/remote/read.go @@ -165,11 +165,11 @@ func (q *querier) Select(ctx context.Context, sortSeries bool, hints *storage.Se return storage.ErrSeriesSet(fmt.Errorf("toQuery: %w", err)) } - res, err := q.client.Read(ctx, query) + res, err := q.client.Read(ctx, query, sortSeries) if err != nil { return storage.ErrSeriesSet(fmt.Errorf("remote_read: %w", err)) } - return newSeriesSetFilter(FromQueryResult(sortSeries, res), added) + return newSeriesSetFilter(res, added) } // addExternalLabels adds matchers for each external label. External labels diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index a68187268..4cd4647e7 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -179,7 +179,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) { require.Equal(b, 2, recorder.Code/100) var results []*prompb.ChunkedReadResponse - stream := NewChunkedReader(recorder.Result().Body, DefaultChunkedReadLimit, nil) + stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil) for { res := &prompb.ChunkedReadResponse{} @@ -280,7 +280,7 @@ func TestStreamReadEndpoint(t *testing.T) { require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding")) var results []*prompb.ChunkedReadResponse - stream := NewChunkedReader(recorder.Result().Body, DefaultChunkedReadLimit, nil) + stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil) for { res := &prompb.ChunkedReadResponse{} err := stream.NextProto(res) diff --git a/storage/remote/read_test.go b/storage/remote/read_test.go index 357bdba1f..d63cefc3f 100644 --- a/storage/remote/read_test.go +++ b/storage/remote/read_test.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/testutil" ) @@ -198,7 +199,7 @@ type mockedRemoteClient struct { b labels.ScratchBuilder } -func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prompb.QueryResult, error) { +func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) { if c.got != nil { return nil, fmt.Errorf("expected only one call to remote client got: %v", query) } @@ -227,7 +228,7 @@ func (c *mockedRemoteClient) Read(_ context.Context, query *prompb.Query) (*prom q.Timeseries = append(q.Timeseries, &prompb.TimeSeries{Labels: s.Labels}) } } - return q, nil + return FromQueryResult(sortSeries, q), nil } func (c *mockedRemoteClient) reset() { diff --git a/storage/remote/storage.go b/storage/remote/storage.go index afa2d411a..05634f179 100644 --- a/storage/remote/storage.go +++ b/storage/remote/storage.go @@ -115,6 +115,7 @@ func (s *Storage) ApplyConfig(conf *config.Config) error { c, err := NewReadClient(name, &ClientConfig{ URL: rrConf.URL, Timeout: rrConf.RemoteTimeout, + ChunkedReadLimit: rrConf.ChunkedReadLimit, HTTPClientConfig: rrConf.HTTPClientConfig, Headers: rrConf.Headers, }) diff --git a/storage/remote/write_handler.go b/storage/remote/write_handler.go index aba79a561..58fb668cc 100644 --- a/storage/remote/write_handler.go +++ b/storage/remote/write_handler.go @@ -28,6 +28,7 @@ import ( "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -236,11 +237,16 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err b := labels.NewScratchBuilder(0) for _, ts := range req.Timeseries { ls := ts.ToLabels(&b, nil) - if !ls.Has(labels.MetricName) || !ls.IsValid() { + + // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are + // potentially written. Perhaps unify with fixed writeV2 implementation a bit. + if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ - // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are - // potentially written. Perhaps unify with fixed writeV2 implementation a bit. + continue + } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { + level.Warn(h.logger).Log("msg", "Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) + samplesWithInvalidLabels++ continue } @@ -375,10 +381,14 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Validate series labels early. // NOTE(bwplotka): While spec allows UTF-8, Prometheus Receiver may impose // specific limits and follow https://prometheus.io/docs/specs/remote_write_spec_2_0/#invalid-samples case. - if !ls.Has(labels.MetricName) || !ls.IsValid() { + if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid metric name or labels, got %v", ls.String())) samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) continue + } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { + badRequestErrs = append(badRequestErrs, fmt.Errorf("invalid labels for series, labels %v, duplicated label %s", ls.String(), duplicateLabel)) + samplesWithInvalidLabels += len(ts.Samples) + len(ts.Histograms) + continue } allSamplesSoFar := rs.AllSamples() @@ -502,12 +512,17 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { otlpCfg := h.configFunc().OTLPConfig converter := otlptranslator.NewPrometheusConverter() - if err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{ + annots, err := converter.FromMetrics(req.Metrics(), otlptranslator.Settings{ AddMetricSuffixes: true, PromoteResourceAttributes: otlpCfg.PromoteResourceAttributes, - }); err != nil { + }) + if err != nil { level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) } + ws, _ := annots.AsStrings("", 0, 0) + if len(ws) > 0 { + level.Warn(h.logger).Log("msg", "Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + } err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ Timeseries: converter.TimeSeries(), diff --git a/storage/remote/write_handler_test.go b/storage/remote/write_handler_test.go index af2229b9a..5c89a1ab9 100644 --- a/storage/remote/write_handler_test.go +++ b/storage/remote/write_handler_test.go @@ -338,6 +338,15 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { expectedCode: http.StatusBadRequest, expectedRespBody: "invalid metric name or labels, got {__name__=\"\"}\n", }, + { + desc: "Partial write; first series with duplicate labels", + input: append( + // Series with __name__="test_metric1",test_metric1="test_metric1",test_metric1="test_metric1" labels. + []writev2.TimeSeries{{LabelsRefs: []uint32{1, 2, 2, 2, 2, 2}, Samples: []writev2.Sample{{Value: 1, Timestamp: 1}}}}, + writeV2RequestFixture.Timeseries...), + expectedCode: http.StatusBadRequest, + expectedRespBody: "invalid labels for series, labels {__name__=\"test_metric1\", test_metric1=\"test_metric1\", test_metric1=\"test_metric1\"}, duplicated label test_metric1\n", + }, { desc: "Partial write; first series with one OOO sample", input: func() []writev2.TimeSeries { @@ -453,10 +462,10 @@ func TestRemoteWriteHandler_V2Message(t *testing.T) { expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenHistogramsHeader)) expectHeaderValue(t, 0, resp.Header.Get(rw20WrittenExemplarsHeader)) - require.Empty(t, len(appendable.samples)) - require.Empty(t, len(appendable.histograms)) - require.Empty(t, len(appendable.exemplars)) - require.Empty(t, len(appendable.metadata)) + require.Empty(t, appendable.samples) + require.Empty(t, appendable.histograms) + require.Empty(t, appendable.exemplars) + require.Empty(t, appendable.metadata) return } @@ -836,6 +845,13 @@ func (m *mockAppendable) Append(_ storage.SeriesRef, l labels.Labels, t int64, v return 0, storage.ErrDuplicateSampleForTimestamp } + if l.IsEmpty() { + return 0, tsdb.ErrInvalidSample + } + if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates { + return 0, tsdb.ErrInvalidSample + } + m.latestSample[l.Hash()] = t m.samples = append(m.samples, mockSample{l, t, v}) return 0, nil @@ -887,6 +903,13 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t return 0, storage.ErrDuplicateSampleForTimestamp } + if l.IsEmpty() { + return 0, tsdb.ErrInvalidSample + } + if _, hasDuplicates := l.HasDuplicateLabelNames(); hasDuplicates { + return 0, tsdb.ErrInvalidSample + } + m.latestHistogram[l.Hash()] = t m.histograms = append(m.histograms, mockHistogram{l, t, h, fh}) return 0, nil diff --git a/storage/series_test.go b/storage/series_test.go index 6995468b4..f8ba2af67 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -72,7 +72,7 @@ func TestListSeriesIterator(t *testing.T) { require.Equal(t, chunkenc.ValNone, it.Seek(2)) } -// TestSeriesSetToChunkSet test the property of SeriesSet that says +// TestChunkSeriesSetToSeriesSet test the property of SeriesSet that says // returned series should be iterable even after Next is called. func TestChunkSeriesSetToSeriesSet(t *testing.T) { series := []struct { @@ -126,6 +126,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { type histogramTest struct { samples []chunks.Sample + expectedSamples []chunks.Sample expectedCounterResetHeaders []chunkenc.CounterResetHeader } @@ -141,6 +142,32 @@ func TestHistogramSeriesToChunks(t *testing.T) { }, PositiveBuckets: []int64{2, 1}, // Abs: 2, 3 } + // h1 but with an extra empty bucket at offset -10. + // This can happen if h1 is from a recoded chunk, where a later histogram had a bucket at offset -10. + h1ExtraBuckets := &histogram.Histogram{ + Count: 7, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: -10, Length: 1}, + {Offset: 9, Length: 2}, + }, + PositiveBuckets: []int64{0, 2, 1}, // Abs: 0, 2, 3 + } + h1Recoded := &histogram.Histogram{ + Count: 7, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0 + } // Appendable to h1. h2 := &histogram.Histogram{ Count: 12, @@ -179,6 +206,32 @@ func TestHistogramSeriesToChunks(t *testing.T) { }, PositiveBuckets: []float64{3, 1}, } + // fh1 but with an extra empty bucket at offset -10. + // This can happen if fh1 is from a recoded chunk, where a later histogram had a bucket at offset -10. + fh1ExtraBuckets := &histogram.FloatHistogram{ + Count: 6, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: -10, Length: 1}, + {Offset: 9, Length: 2}, + }, + PositiveBuckets: []float64{0, 3, 1}, + } + fh1Recoded := &histogram.FloatHistogram{ + Count: 6, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{3, 1, 0, 0}, + } // Appendable to fh1. fh2 := &histogram.FloatHistogram{ Count: 17, @@ -219,6 +272,20 @@ func TestHistogramSeriesToChunks(t *testing.T) { }, PositiveBuckets: []int64{2, 1}, // Abs: 2, 3 } + // gh1 recoded to add extra empty buckets at end. + gh1Recoded := &histogram.Histogram{ + CounterResetHint: histogram.GaugeType, + Count: 7, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{2, 1, -3, 0}, // Abs: 2, 3, 0, 0 + } gh2 := &histogram.Histogram{ CounterResetHint: histogram.GaugeType, Count: 12, @@ -246,6 +313,20 @@ func TestHistogramSeriesToChunks(t *testing.T) { }, PositiveBuckets: []float64{3, 1}, } + // gfh1 recoded to add an extra empty buckets at end. + gfh1Recoded := &histogram.FloatHistogram{ + CounterResetHint: histogram.GaugeType, + Count: 6, + ZeroCount: 2, + ZeroThreshold: 0.001, + Sum: 100, + Schema: 0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{3, 1, 0, 0}, + } gfh2 := &histogram.FloatHistogram{ CounterResetHint: histogram.GaugeType, Count: 17, @@ -272,6 +353,9 @@ func TestHistogramSeriesToChunks(t *testing.T) { samples: []chunks.Sample{ hSample{t: 1, h: h1}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to a single chunk": { @@ -279,6 +363,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: h1}, hSample{t: 2, h: h2}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h1Recoded}, + hSample{t: 2, h: h2}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two histograms encoded to two chunks": { @@ -286,6 +374,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: h2}, hSample{t: 2, h: h1}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h2}, + hSample{t: 2, h: h1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "histogram and stale sample encoded to two chunks": { @@ -293,6 +385,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: staleHistogram}, hSample{t: 2, h: h1}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: staleHistogram}, + hSample{t: 2, h: h1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and reduction in bucket encoded to two chunks": { @@ -300,6 +396,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: h1}, hSample{t: 2, h: h2down}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h1}, + hSample{t: 2, h: h2down}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, // Float histograms. @@ -307,6 +407,9 @@ func TestHistogramSeriesToChunks(t *testing.T) { samples: []chunks.Sample{ fhSample{t: 1, fh: fh1}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: fh1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to a single chunk": { @@ -314,6 +417,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: fh1Recoded}, + fhSample{t: 2, fh: fh2}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, }, "two float histograms encoded to two chunks": { @@ -321,6 +428,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: fh2}, fhSample{t: 2, fh: fh1}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: fh2}, + fhSample{t: 2, fh: fh1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, "float histogram and stale sample encoded to two chunks": { @@ -328,6 +439,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: staleFloatHistogram}, fhSample{t: 2, fh: fh1}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: staleFloatHistogram}, + fhSample{t: 2, fh: fh1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and reduction in bucket encoded to two chunks": { @@ -335,6 +450,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: fh1}, fhSample{t: 2, fh: fh2down}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: fh1}, + fhSample{t: 2, fh: fh2down}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.CounterReset}, }, // Mixed. @@ -343,6 +462,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: h1}, fhSample{t: 2, fh: fh2}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h1}, + fhSample{t: 2, fh: fh2}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "float histogram and histogram encoded to two chunks": { @@ -350,6 +473,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: fh1}, hSample{t: 2, h: h2}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: fh1}, + hSample{t: 2, h: h2}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "histogram and stale float histogram encoded to two chunks": { @@ -357,12 +484,19 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: h1}, fhSample{t: 2, fh: staleFloatHistogram}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h1}, + fhSample{t: 2, fh: staleFloatHistogram}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset, chunkenc.UnknownCounterReset}, }, "single gauge histogram encoded to one chunk": { samples: []chunks.Sample{ hSample{t: 1, h: gh1}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: gh1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two gauge histograms encoded to one chunk when counter increases": { @@ -370,6 +504,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: gh1}, hSample{t: 2, h: gh2}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: gh1Recoded}, + hSample{t: 2, h: gh2}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two gauge histograms encoded to one chunk when counter decreases": { @@ -377,12 +515,19 @@ func TestHistogramSeriesToChunks(t *testing.T) { hSample{t: 1, h: gh2}, hSample{t: 2, h: gh1}, }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: gh2}, + hSample{t: 2, h: gh1Recoded}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "single gauge float histogram encoded to one chunk": { samples: []chunks.Sample{ fhSample{t: 1, fh: gfh1}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: gfh1}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two float gauge histograms encoded to one chunk when counter increases": { @@ -390,6 +535,10 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: gfh1}, fhSample{t: 2, fh: gfh2}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: gfh1Recoded}, + fhSample{t: 2, fh: gfh2}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, "two float gauge histograms encoded to one chunk when counter decreases": { @@ -397,8 +546,34 @@ func TestHistogramSeriesToChunks(t *testing.T) { fhSample{t: 1, fh: gfh2}, fhSample{t: 2, fh: gfh1}, }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: gfh2}, + fhSample{t: 2, fh: gfh1Recoded}, + }, expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.GaugeType}, }, + "histogram with extra empty bucket followed by histogram encodes to one chunk": { + samples: []chunks.Sample{ + hSample{t: 1, h: h1ExtraBuckets}, + hSample{t: 2, h: h1}, + }, + expectedSamples: []chunks.Sample{ + hSample{t: 1, h: h1ExtraBuckets}, + hSample{t: 2, h: h1ExtraBuckets}, // Recoded to add the missing buckets. + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, + }, + "float histogram with extra empty bucket followed by float histogram encodes to one chunk": { + samples: []chunks.Sample{ + fhSample{t: 1, fh: fh1ExtraBuckets}, + fhSample{t: 2, fh: fh1}, + }, + expectedSamples: []chunks.Sample{ + fhSample{t: 1, fh: fh1ExtraBuckets}, + fhSample{t: 2, fh: fh1ExtraBuckets}, // Recoded to add the missing buckets. + }, + expectedCounterResetHeaders: []chunkenc.CounterResetHeader{chunkenc.UnknownCounterReset}, + }, } for testName, test := range tests { @@ -431,9 +606,9 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := chunks.ChunkMetasToSamples(chks) - require.Equal(t, len(test.samples), len(encodedSamples)) + require.Equal(t, len(test.expectedSamples), len(encodedSamples)) - for i, s := range test.samples { + for i, s := range test.expectedSamples { encodedSample := encodedSamples[i] switch expectedSample := s.(type) { case hSample: @@ -447,7 +622,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { require.True(t, value.IsStaleNaN(h.Sum), fmt.Sprintf("at idx %d", i)) continue } - require.Equal(t, *expectedSample.h, *h.Compact(0), fmt.Sprintf("at idx %d", i)) + require.Equal(t, *expectedSample.h, *h, fmt.Sprintf("at idx %d", i)) case fhSample: require.Equal(t, chunkenc.ValFloatHistogram, encodedSample.Type(), "expect float histogram", fmt.Sprintf("at idx %d", i)) fh := encodedSample.FH() @@ -459,7 +634,7 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { require.True(t, value.IsStaleNaN(fh.Sum), fmt.Sprintf("at idx %d", i)) continue } - require.Equal(t, *expectedSample.fh, *fh.Compact(0), fmt.Sprintf("at idx %d", i)) + require.Equal(t, *expectedSample.fh, *fh, fmt.Sprintf("at idx %d", i)) default: t.Error("internal error, unexpected type") } diff --git a/template/template.go b/template/template.go index dbe1607cf..0698c6c8a 100644 --- a/template/template.go +++ b/template/template.go @@ -23,7 +23,6 @@ import ( "net" "net/url" "sort" - "strconv" "strings" text_template "text/template" "time" @@ -106,25 +105,6 @@ func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (quer return result, nil } -func convertToFloat(i interface{}) (float64, error) { - switch v := i.(type) { - case float64: - return v, nil - case string: - return strconv.ParseFloat(v, 64) - case int: - return float64(v), nil - case uint: - return float64(v), nil - case int64: - return float64(v), nil - case uint64: - return float64(v), nil - default: - return 0, fmt.Errorf("can't convert %T to float", v) - } -} - // Expander executes templates in text or HTML mode with a common set of Prometheus template functions. type Expander struct { text string @@ -186,7 +166,7 @@ func NewTemplateExpander( return html_template.HTML(text) }, "match": regexp.MatchString, - "title": strings.Title, //nolint:staticcheck + "title": strings.Title, //nolint:staticcheck // TODO(beorn7): Need to come up with a replacement using the cases package. "toUpper": strings.ToUpper, "toLower": strings.ToLower, "graphLink": strutil.GraphLinkForExpression, @@ -219,7 +199,7 @@ func NewTemplateExpander( return host }, "humanize": func(i interface{}) (string, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err } @@ -248,7 +228,7 @@ func NewTemplateExpander( return fmt.Sprintf("%.4g%s", v, prefix), nil }, "humanize1024": func(i interface{}) (string, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err } @@ -267,30 +247,15 @@ func NewTemplateExpander( }, "humanizeDuration": common_templates.HumanizeDuration, "humanizePercentage": func(i interface{}) (string, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return "", err } return fmt.Sprintf("%.4g%%", v*100), nil }, - "humanizeTimestamp": func(i interface{}) (string, error) { - v, err := convertToFloat(i) - if err != nil { - return "", err - } - - tm, err := floatToTime(v) - switch { - case errors.Is(err, errNaNOrInf): - return fmt.Sprintf("%.4g", v), nil - case err != nil: - return "", err - } - - return fmt.Sprint(tm), nil - }, + "humanizeTimestamp": common_templates.HumanizeTimestamp, "toTime": func(i interface{}) (*time.Time, error) { - v, err := convertToFloat(i) + v, err := common_templates.ConvertToFloat(i) if err != nil { return nil, err } diff --git a/tracing/testdata/ca.cer b/tracing/testdata/ca.cer index 86f627a90..dbbd009d4 100644 --- a/tracing/testdata/ca.cer +++ b/tracing/testdata/ca.cer @@ -1,8 +1,66 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 93:6c:9e:29:8d:37:7b:66 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA + Validity + Not Before: Aug 20 11:51:23 2024 GMT + Not After : Dec 5 11:51:23 2044 GMT + Subject: C = XX, L = Default City, O = Default Company Ltd, CN = Prometheus Test CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:e9:52:05:4d:f2:5a:95:04:2d:b8:73:8b:3c:e7: + 47:48:67:00:be:dd:6c:41:f3:7c:4b:44:73:a3:77: + 3e:84:af:30:d7:2a:ad:45:6a:b7:89:23:05:15:b3: + aa:46:79:b8:95:64:cc:13:c4:44:a1:01:a0:e2:3d: + a5:67:2b:aa:d3:13:06:43:33:1c:96:36:12:9e:c6: + 1d:36:9b:d7:47:bd:28:2d:88:15:04:fa:14:a3:ff: + 8c:26:22:c5:a2:15:c7:76:b3:11:f6:a3:44:9a:28: + 0f:ca:fb:f4:51:a8:6a:05:94:7c:77:47:c8:21:56: + 25:bf:e2:2e:df:33:f3:e4:bd:d6:47:a5:49:13:12: + c8:1f:4c:d7:2a:56:a2:6c:c1:cf:55:05:5d:9a:75: + a2:23:4e:e6:8e:ff:76:05:d6:e0:c8:0b:51:f0:b6: + a1:b2:7d:8f:9c:6a:a5:ce:86:92:15:8c:5b:86:45: + c0:4a:ff:54:b8:ee:cf:11:bd:07:cb:4b:7d:0b:a1: + 9d:72:86:9f:55:bc:f9:6c:d9:55:60:96:30:3f:ec: + 2d:f6:5f:9a:32:9a:5a:5c:1c:5f:32:f9:d6:0f:04: + f8:81:08:04:9a:95:c3:9d:5a:30:8e:a5:0e:47:2f: + 00:ce:e0:2e:ad:5a:b8:b6:4c:55:7c:8a:59:22:b0: + ed:73 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B + X509v3 Authority Key Identifier: + CC:F5:05:99:E5:AB:12:69:D8:78:89:4A:31:CA:F0:8B:0B:AD:66:1B + X509v3 Basic Constraints: + CA:TRUE + Signature Algorithm: sha256WithRSAEncryption + Signature Value: + 4a:a1:b0:bc:c8:87:4f:7c:96:62:e5:09:29:ae:3a:2e:68:d0: + d2:c5:68:ed:ea:83:36:b1:86:f3:b9:e9:19:2b:b6:73:10:6f: + df:7f:bb:f1:76:81:03:c1:a1:5a:ee:6c:44:b8:7c:10:d1:5a: + d7:c1:92:64:59:35:a6:e0:aa:08:41:37:6e:e7:c8:b6:bd:0c: + 4b:47:78:ec:c4:b4:15:a3:62:76:4a:39:8e:6e:19:ff:f0:c0: + 8a:7e:1c:cd:87:e5:00:6c:f1:ce:27:26:ff:b8:e9:eb:f7:2f: + bd:c2:4b:9c:d6:57:de:74:74:b3:4f:03:98:9a:b5:08:2d:16: + ca:7f:b6:c8:76:62:86:1b:7c:f2:3e:6c:78:cc:2c:95:9a:bb: + 77:25:e8:80:ff:9b:e8:f8:9a:85:3b:85:b7:17:4e:77:a1:cf: + 4d:b9:d0:25:e8:5d:8c:e6:7c:f1:d9:52:30:3d:ec:2b:37:91: + bc:e2:e8:39:31:6f:3d:e9:98:70:80:7c:41:dd:19:13:05:21: + 94:7b:16:cf:d8:ee:4e:38:34:5e:6a:ff:cd:85:ac:8f:94:9a: + dd:4e:77:05:13:a6:b4:80:52:b2:97:64:76:88:f4:dd:42:0a: + 50:1c:80:fd:4b:6e:a9:62:10:aa:ef:2e:c1:2f:be:0e:c2:2e: + b5:28:5f:83 -----BEGIN CERTIFICATE----- MIIDkTCCAnmgAwIBAgIJAJNsnimNN3tmMA0GCSqGSIb3DQEBCwUAMF8xCzAJBgNV BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg -Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0xNTA4 -MDQxNDA5MjFaFw0yNTA4MDExNDA5MjFaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH +Q29tcGFueSBMdGQxGzAZBgNVBAMMElByb21ldGhldXMgVGVzdCBDQTAeFw0yNDA4 +MjAxMTUxMjNaFw00NDEyMDUxMTUxMjNaMF8xCzAJBgNVBAYTAlhYMRUwEwYDVQQH DAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxGzAZ BgNVBAMMElByb21ldGhldXMgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP ADCCAQoCggEBAOlSBU3yWpUELbhzizznR0hnAL7dbEHzfEtEc6N3PoSvMNcqrUVq @@ -12,11 +70,11 @@ yB9M1ypWomzBz1UFXZp1oiNO5o7/dgXW4MgLUfC2obJ9j5xqpc6GkhWMW4ZFwEr/ VLjuzxG9B8tLfQuhnXKGn1W8+WzZVWCWMD/sLfZfmjKaWlwcXzL51g8E+IEIBJqV w51aMI6lDkcvAM7gLq1auLZMVXyKWSKw7XMCAwEAAaNQME4wHQYDVR0OBBYEFMz1 BZnlqxJp2HiJSjHK8IsLrWYbMB8GA1UdIwQYMBaAFMz1BZnlqxJp2HiJSjHK8IsL -rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAI2iA3w3TK5J15Pu -e4fPFB4jxQqsbUwuyXbCCv/jKLeFNCD4BjM181WZEYjPMumeTBVzU3aF45LWQIG1 -0DJcrCL4mjMz9qgAoGqA7aDDXiJGbukMgYYsn7vrnVmrZH8T3E8ySlltr7+W578k -pJ5FxnbCroQwn0zLyVB3sFbS8E3vpBr3L8oy8PwPHhIScexcNVc3V6/m4vTZsXTH -U+vUm1XhDgpDcFMTg2QQiJbfpOYUkwIgnRDAT7t282t2KQWtnlqc3zwPQ1F/6Cpx -j19JeNsaF1DArkD7YlyKj/GhZLtHwFHG5cxznH0mLDJTW7bQvqqh2iQTeXmBk1lU -mM5lH/s= +rWYbMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAEqhsLzIh098lmLl +CSmuOi5o0NLFaO3qgzaxhvO56RkrtnMQb99/u/F2gQPBoVrubES4fBDRWtfBkmRZ +NabgqghBN27nyLa9DEtHeOzEtBWjYnZKOY5uGf/wwIp+HM2H5QBs8c4nJv+46ev3 +L73CS5zWV950dLNPA5iatQgtFsp/tsh2YoYbfPI+bHjMLJWau3cl6ID/m+j4moU7 +hbcXTnehz0250CXoXYzmfPHZUjA97Cs3kbzi6Dkxbz3pmHCAfEHdGRMFIZR7Fs/Y +7k44NF5q/82FrI+Umt1OdwUTprSAUrKXZHaI9N1CClAcgP1LbqliEKrvLsEvvg7C +LrUoX4M= -----END CERTIFICATE----- diff --git a/tsdb/agent/db.go b/tsdb/agent/db.go index 1b6df3af0..9697739e0 100644 --- a/tsdb/agent/db.go +++ b/tsdb/agent/db.go @@ -1118,7 +1118,7 @@ func (a *appender) logSeries() error { return nil } -// mintTs returns the minimum timestamp that a sample can have +// minValidTime returns the minimum timestamp that a sample can have // and is needed for preventing underflow. func (a *appender) minValidTime(lastTs int64) int64 { if lastTs < math.MinInt64+a.opts.OutOfOrderTimeWindow { diff --git a/tsdb/chunkenc/chunk.go b/tsdb/chunkenc/chunk.go index 1421f3b39..7082f34c3 100644 --- a/tsdb/chunkenc/chunk.go +++ b/tsdb/chunkenc/chunk.go @@ -213,7 +213,7 @@ func MockSeriesIterator(timestamps []int64, values []float64) Iterator { return &mockSeriesIterator{ timeStamps: timestamps, values: values, - currIndex: 0, + currIndex: -1, } } diff --git a/tsdb/chunkenc/float_histogram.go b/tsdb/chunkenc/float_histogram.go index a7c1fffb1..f18eb77da 100644 --- a/tsdb/chunkenc/float_histogram.go +++ b/tsdb/chunkenc/float_histogram.go @@ -219,16 +219,25 @@ func (a *FloatHistogramAppender) Append(int64, float64) { } // appendable returns whether the chunk can be appended to, and if so whether -// any recoding needs to happen using the provided inserts (in case of any new -// buckets, positive or negative range, respectively). If the sample is a gauge -// histogram, AppendableGauge must be used instead. +// 1. Any recoding needs to happen to the chunk using the provided forward +// inserts (in case of any new buckets, positive or negative range, +// respectively). +// 2. Any recoding needs to happen for the histogram being appended, using the +// backward inserts (in case of any missing buckets, positive or negative +// range, respectively). +// +// If the sample is a gauge histogram, AppendableGauge must be used instead. // // The chunk is not appendable in the following cases: +// // - The schema has changed. // - The custom bounds have changed if the current schema is custom buckets. // - The threshold for the zero bucket has changed. -// - Any buckets have disappeared. -// - There was a counter reset in the count of observations or in any bucket, including the zero bucket. +// - Any buckets have disappeared, unless the bucket count was 0, unused. +// Empty bucket can happen if the chunk was recoded and we're merging a non +// recoded histogram. In this case backward inserts will be provided. +// - There was a counter reset in the count of observations or in any bucket, +// including the zero bucket. // - The last sample in the chunk was stale while the current sample is not stale. // // The method returns an additional boolean set to true if it is not appendable @@ -236,6 +245,7 @@ func (a *FloatHistogramAppender) Append(int64, float64) { // append. If counterReset is true, okToAppend is always false. func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( positiveInserts, negativeInserts []Insert, + backwardPositiveInserts, backwardNegativeInserts []Insert, okToAppend, counterReset bool, ) { if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType { @@ -279,27 +289,218 @@ func (a *FloatHistogramAppender) appendable(h *histogram.FloatHistogram) ( } var ok bool - positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans) + positiveInserts, backwardPositiveInserts, ok = expandFloatSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets) if !ok { counterReset = true return } - negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans) + negativeInserts, backwardNegativeInserts, ok = expandFloatSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets) if !ok { counterReset = true return } - if counterResetInAnyFloatBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) || - counterResetInAnyFloatBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) { - counterReset, positiveInserts, negativeInserts = true, nil, nil - return - } - okToAppend = true return } +// expandFloatSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that +// they match the spans in 'b'. 'b' must cover the same or more buckets than +// 'a', otherwise the function will return false. +// The function also returns the inserts to expand 'b' to also cover all the +// buckets that are missing in 'b', but are present with 0 counter value in 'a'. +// The function also checks for counter resets between 'a' and 'b'. +// +// Example: +// +// Let's say the old buckets look like this: +// +// span syntax: [offset, length] +// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1] +// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15] +// raw values 6 3 3 2 4 5 1 +// deltas 6 -3 0 -1 2 1 -4 +// +// But now we introduce a new bucket layout. (Carefully chosen example where we +// have a span appended, one unchanged[*], one prepended, and two merge - in +// that order.) +// +// [*] unchanged in terms of which bucket indices they represent. but to achieve +// that, their offset needs to change if "disrupted" by spans changing ahead of +// them +// +// \/ this one is "unchanged" +// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ] +// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15] +// raw values 6 3 0 3 0 0 2 4 5 0 1 +// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 +// delta mods: / \ / \ / \ +// +// Note for histograms with delta-encoded buckets: Whenever any new buckets are +// introduced, the subsequent "old" bucket needs to readjust its delta to the +// new base of 0. Thus, for the caller who wants to transform the set of +// original deltas to a new set of deltas to match a new span layout that adds +// buckets, we simply need to generate a list of inserts. +// +// Note: Within expandSpansForward we don't have to worry about the changes to the +// spans themselves, thanks to the iterators we get to work with the more useful +// bucket indices (which of course directly correspond to the buckets we have to +// adjust). +func expandFloatSpansAndBuckets(a, b []histogram.Span, aBuckets []xorValue, bBuckets []float64) (forward, backward []Insert, ok bool) { + ai := newBucketIterator(a) + bi := newBucketIterator(b) + + var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b. + var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a. + + // When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should + // be yielded when we finish a streak of new buckets. + var aInter Insert + var bInter Insert + + aIdx, aOK := ai.Next() + bIdx, bOK := bi.Next() + + // Bucket count. Initialize the absolute count and index into the + // positive/negative counts or deltas array. The bucket count is + // used to detect counter reset as well as unused buckets in a. + var ( + aCount float64 + bCount float64 + aCountIdx int + bCountIdx int + ) + if aOK { + aCount = aBuckets[aCountIdx].value + } + if bOK { + bCount = bBuckets[bCountIdx] + } + +loop: + for { + switch { + case aOK && bOK: + switch { + case aIdx == bIdx: // Both have an identical bucket index. + // Bucket count. Check bucket for reset from a to b. + if aCount > bCount { + return nil, nil, false + } + + // Finish WIP insert for a and reset. + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + + // Finish WIP insert for b and reset. + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + + aIdx, aOK = ai.Next() + bIdx, bOK = bi.Next() + aInter.pos++ // Advance potential insert position. + aCountIdx++ // Advance absolute bucket count index for a. + if aOK { + aCount = aBuckets[aCountIdx].value + } + bInter.pos++ // Advance potential insert position. + bCountIdx++ // Advance absolute bucket count index for b. + if bOK { + bCount = bBuckets[bCountIdx] + } + + continue + case aIdx < bIdx: // b misses a bucket index that is in a. + // This is ok if the count in a is 0, in which case we make a note to + // fill in the bucket in b and advance a. + if aCount == 0 { + bInter.num++ // Mark that we need to insert a bucket in b. + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount = aBuckets[aCountIdx].value + } + continue + } + // Otherwise we are missing a bucket that was in use in a, which is a reset. + return nil, nil, false + case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. + aInter.num++ + bInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount = bBuckets[bCountIdx] + } + } + case aOK && !bOK: // b misses a value that is in a. + // This is ok if the count in a is 0, in which case we make a note to + // fill in the bucket in b and advance a. + if aCount == 0 { + bInter.num++ + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for a. + aCountIdx++ + if aOK { + aCount = aBuckets[aCountIdx].value + } + continue + } + // Otherwise we are missing a bucket that was in use in a, which is a reset. + return nil, nil, false + case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. + aInter.num++ + bInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for b. + bCountIdx++ + if bOK { + bCount = bBuckets[bCountIdx] + } + default: // Both iterators ran out. We're done. + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + } + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + } + break loop + } + } + + return aInserts, bInserts, true +} + // appendableGauge returns whether the chunk can be appended to, and if so // whether: // 1. Any recoding needs to happen to the chunk using the provided inserts @@ -349,76 +550,6 @@ func (a *FloatHistogramAppender) appendableGauge(h *histogram.FloatHistogram) ( return } -// counterResetInAnyFloatBucket returns true if there was a counter reset for any -// bucket. This should be called only when the bucket layout is the same or new -// buckets were added. It does not handle the case of buckets missing. -func counterResetInAnyFloatBucket(oldBuckets []xorValue, newBuckets []float64, oldSpans, newSpans []histogram.Span) bool { - if len(oldSpans) == 0 || len(oldBuckets) == 0 { - return false - } - - var ( - oldSpanSliceIdx, newSpanSliceIdx int = -1, -1 // Index for the span slices. Starts at -1 to indicate that the first non empty span is not yet found. - oldInsideSpanIdx, newInsideSpanIdx uint32 // Index inside a span. - oldIdx, newIdx int32 // Index inside a bucket slice. - oldBucketSliceIdx, newBucketSliceIdx int // Index inside bucket slice. - ) - - // Find first non empty spans. - oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans) - newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans) - oldVal, newVal := oldBuckets[0].value, newBuckets[0] - - // Since we assume that new spans won't have missing buckets, there will never be a case - // where the old index will not find a matching new index. - for { - if oldIdx == newIdx { - if newVal < oldVal { - return true - } - } - - if oldIdx <= newIdx { - // Moving ahead old bucket and span by 1 index. - if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length { - // Current span is over. - oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans) - oldInsideSpanIdx = 0 - if oldSpanSliceIdx >= len(oldSpans) { - // All old spans are over. - break - } - } else { - oldInsideSpanIdx++ - oldIdx++ - } - oldBucketSliceIdx++ - oldVal = oldBuckets[oldBucketSliceIdx].value - } - - if oldIdx > newIdx { - // Moving ahead new bucket and span by 1 index. - if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length { - // Current span is over. - newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans) - newInsideSpanIdx = 0 - if newSpanSliceIdx >= len(newSpans) { - // All new spans are over. - // This should not happen, old spans above should catch this first. - panic("new spans over before old spans in counterReset") - } - } else { - newInsideSpanIdx++ - newIdx++ - } - newBucketSliceIdx++ - newVal = newBuckets[newBucketSliceIdx] - } - } - - return false -} - // appendFloatHistogram appends a float histogram to the chunk. The caller must ensure that // the histogram is properly structured, e.g. the number of buckets used // corresponds to the number conveyed by the span structures. First call @@ -614,7 +745,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend a.setCounterResetHeader(CounterReset) case prev != nil: // This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set. - _, _, _, counterReset := prev.appendable(h) + _, _, _, _, _, counterReset := prev.appendable(h) if counterReset { a.setCounterResetHeader(CounterReset) } else { @@ -626,7 +757,7 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend // Adding counter-like histogram. if h.CounterResetHint != histogram.GaugeType { - pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h) + pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h) if !okToAppend || counterReset { if appendOnly { if counterReset { @@ -646,6 +777,23 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend happ.appendFloatHistogram(t, h) return newChunk, false, app, nil } + if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 { + // The histogram needs to be expanded to have the extra empty buckets + // of the chunk. + if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { + // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + // However we need to make a copy in case the input is sharing spans from an iterator. + h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) + copy(h.PositiveSpans, a.pSpans) + h.NegativeSpans = make([]histogram.Span, len(a.nSpans)) + copy(h.NegativeSpans, a.nSpans) + } else { + // Spans need pre-adjusting to accommodate the new buckets. + h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts) + h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts) + } + a.recodeHistogram(h, pBackwardInserts, nBackwardInserts) + } if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { if appendOnly { return nil, false, a, fmt.Errorf("float histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts)) @@ -826,6 +974,7 @@ func (it *floatHistogramIterator) Reset(b []byte) { if it.atFloatHistogramCalled { it.atFloatHistogramCalled = false it.pBuckets, it.nBuckets = nil, nil + it.pSpans, it.nSpans = nil, nil } else { it.pBuckets, it.nBuckets = it.pBuckets[:0], it.nBuckets[:0] } @@ -921,7 +1070,7 @@ func (it *floatHistogramIterator) Next() ValueType { // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // so we don't need a separate single delta logic for the 2nd sample. - // Recycle bucket slices that have not been returned yet. Otherwise, copy them. + // Recycle bucket and span slices that have not been returned yet. Otherwise, copy them. // We can always recycle the slices for leading and trailing bits as they are // never returned to the caller. if it.atFloatHistogramCalled { @@ -940,6 +1089,20 @@ func (it *floatHistogramIterator) Next() ValueType { } else { it.nBuckets = nil } + if len(it.pSpans) > 0 { + newSpans := make([]histogram.Span, len(it.pSpans)) + copy(newSpans, it.pSpans) + it.pSpans = newSpans + } else { + it.pSpans = nil + } + if len(it.nSpans) > 0 { + newSpans := make([]histogram.Span, len(it.nSpans)) + copy(newSpans, it.nSpans) + it.nSpans = newSpans + } else { + it.nSpans = nil + } } tDod, err := readVarbitInt(&it.br) diff --git a/tsdb/chunkenc/float_histogram_test.go b/tsdb/chunkenc/float_histogram_test.go index 2ee4422b9..6092c0f63 100644 --- a/tsdb/chunkenc/float_histogram_test.go +++ b/tsdb/chunkenc/float_histogram_test.go @@ -245,9 +245,11 @@ func TestFloatHistogramChunkBucketChanges(t *testing.T) { h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3) // This is how span changes will be handled. hApp, _ := app.(*FloatHistogramAppender) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2.ToFloat(nil)) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2.ToFloat(nil)) require.NotEmpty(t, posInterjections) require.NotEmpty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.True(t, ok) // Only new buckets came in. require.False(t, cr) c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans) @@ -333,7 +335,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Schema++ - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -343,7 +345,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.ZeroThreshold += 0.1 - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -363,9 +365,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.Sum = 30 h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.NotEmpty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -385,24 +389,94 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.Sum = 21 h2.PositiveBuckets = []float64{6, 3, 2, 4, 5, 1} - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) } + { // New histogram that has buckets missing but the buckets missing were empty. + emptyBucketH := eh.Copy() + emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1} + c, hApp, ts, h1 := setup(emptyBucketH) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 5, Length: 1}, + } + savedH2Spans := h2.PositiveSpans + h2.PositiveBuckets = []float64{7, 4, 3, 5, 2} + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + + // Check that h2 was recoded. + require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2}, h2.PositiveBuckets) + require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") + } + + { // New histogram that has new buckets AND buckets missing but the buckets missing were empty. + emptyBucketH := eh.Copy() + emptyBucketH.PositiveBuckets = []float64{6, 0, 3, 2, 4, 0, 1} + c, hApp, ts, h1 := setup(emptyBucketH) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 3, Length: 2}, + {Offset: 5, Length: 2}, + } + savedH2Spans := h2.PositiveSpans + h2.PositiveBuckets = []float64{7, 4, 3, 5, 2, 3} + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertRecodedFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + + // Check that h2 was recoded. + require.Equal(t, []float64{7, 0, 4, 3, 5, 0, 2, 3}, h2.PositiveBuckets) + require.Equal(t, []histogram.Span{ + {Offset: 0, Length: 2}, // Added empty bucket. + {Offset: 2, Length: 1}, // Existing - offset adjusted. + {Offset: 3, Length: 2}, // Existing. + {Offset: 3, Length: 1}, // Added empty bucket. + {Offset: 1, Length: 2}, // Existing + the extra bucket. + }, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") + } + { // New histogram that has a counter reset while buckets are same. c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Sum = 23 h2.PositiveBuckets = []float64{6, 2, 3, 2, 4, 5, 1} - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -421,9 +495,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.Sum = 29 h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 0} - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -448,9 +524,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2.Sum = 26 h2.PositiveBuckets = []float64{1, 2, 5, 3, 3, 2, 4, 5, 1} - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -524,10 +602,44 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) } + { + // Start a new chunk with a histogram that has an empty bucket. + // Add a histogram that has the same bucket missing. + // This should be appendable and can happen if we are merging from chunks + // where the first sample came from a recoded chunk that added the + // empty bucket. + h1 := eh.Copy() + // Add a bucket that is empty -10 offsets from the first bucket. + h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1) + h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1} + h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length} + for i, v := range eh.PositiveSpans[1:] { + h1.PositiveSpans[i+2] = v + } + h1.PositiveBuckets = make([]float64, len(eh.PositiveBuckets)+1) + h1.PositiveBuckets[0] = 0 + for i, v := range eh.PositiveBuckets { + h1.PositiveBuckets[i+1] = v + } + + c, hApp, ts, _ := setup(h1) + h2 := eh.Copy() + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } + { // Custom buckets, no change. c, hApp, ts, h1 := setup(cbh) h2 := h1.Copy() - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.True(t, ok) assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -538,7 +650,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2 := h1.Copy() h2.Count++ h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 2} - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.True(t, ok) assertNoNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -549,7 +661,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { h2 := h1.Copy() h2.Count-- h2.PositiveBuckets = []float64{6, 3, 3, 2, 4, 5, 0} - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) @@ -559,7 +671,7 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { c, hApp, ts, h1 := setup(cbh) h2 := h1.Copy() h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewFloatHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) @@ -581,9 +693,11 @@ func TestFloatHistogramChunkAppendable(t *testing.T) { // so the new histogram should have new counts >= these per-bucket counts, e.g.: h2.PositiveBuckets = []float64{7, 5, 1, 3, 1, 0, 2, 5, 5, 0, 1} // (total 30) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.NotEmpty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -839,9 +953,11 @@ func TestFloatHistogramChunkAppendableWithEmptySpan(t *testing.T) { require.Equal(t, 1, c.NumSamples()) hApp, _ := app.(*FloatHistogramAppender) - pI, nI, okToAppend, counterReset := hApp.appendable(tc.h2) + pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2) require.Empty(t, pI) require.Empty(t, nI) + require.Empty(t, bpI) + require.Empty(t, bnI) require.True(t, okToAppend) require.False(t, counterReset) }) @@ -1190,3 +1306,54 @@ func TestFloatHistogramAppendOnlyErrors(t *testing.T) { require.EqualError(t, err, "float histogram counter reset") }) } + +func TestFloatHistogramUniqueSpansAfterNext(t *testing.T) { + // Create two histograms with the same schema and spans. + h1 := &histogram.FloatHistogram{ + Schema: 1, + ZeroThreshold: 1e-100, + Count: 10, + ZeroCount: 2, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []float64{1, 2, 3, 4}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []float64{2}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewFloatHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendFloatHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendFloatHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValFloatHistogram, it.Next()) + _, rh1 := it.AtFloatHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValFloatHistogram, it.Next()) + _, rh2 := it.AtFloatHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + + // Check that the spans for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") +} diff --git a/tsdb/chunkenc/histogram.go b/tsdb/chunkenc/histogram.go index aa74badd1..f8796d64e 100644 --- a/tsdb/chunkenc/histogram.go +++ b/tsdb/chunkenc/histogram.go @@ -237,16 +237,23 @@ func (a *HistogramAppender) Append(int64, float64) { } // appendable returns whether the chunk can be appended to, and if so whether -// any recoding needs to happen using the provided inserts (in case of any new -// buckets, positive or negative range, respectively). If the sample is a gauge -// histogram, AppendableGauge must be used instead. +// 1. Any recoding needs to happen to the chunk using the provided forward +// inserts (in case of any new buckets, positive or negative range, +// respectively). +// 2. Any recoding needs to happen for the histogram being appended, using the +// backward inserts (in case of any missing buckets, positive or negative +// range, respectively). +// +// If the sample is a gauge histogram, AppendableGauge must be used instead. // // The chunk is not appendable in the following cases: // // - The schema has changed. // - The custom bounds have changed if the current schema is custom buckets. // - The threshold for the zero bucket has changed. -// - Any buckets have disappeared. +// - Any buckets have disappeared, unless the bucket count was 0, unused. +// Empty bucket can happen if the chunk was recoded and we're merging a non +// recoded histogram. In this case backward inserts will be provided. // - There was a counter reset in the count of observations or in any bucket, // including the zero bucket. // - The last sample in the chunk was stale while the current sample is not stale. @@ -256,6 +263,7 @@ func (a *HistogramAppender) Append(int64, float64) { // append. If counterReset is true, okToAppend is always false. func (a *HistogramAppender) appendable(h *histogram.Histogram) ( positiveInserts, negativeInserts []Insert, + backwardPositiveInserts, backwardNegativeInserts []Insert, okToAppend, counterReset bool, ) { if a.NumSamples() > 0 && a.GetCounterResetHeader() == GaugeType { @@ -299,31 +307,223 @@ func (a *HistogramAppender) appendable(h *histogram.Histogram) ( } var ok bool - positiveInserts, ok = expandSpansForward(a.pSpans, h.PositiveSpans) + positiveInserts, backwardPositiveInserts, ok = expandIntSpansAndBuckets(a.pSpans, h.PositiveSpans, a.pBuckets, h.PositiveBuckets) if !ok { counterReset = true return } - negativeInserts, ok = expandSpansForward(a.nSpans, h.NegativeSpans) + negativeInserts, backwardNegativeInserts, ok = expandIntSpansAndBuckets(a.nSpans, h.NegativeSpans, a.nBuckets, h.NegativeBuckets) if !ok { counterReset = true return } - if counterResetInAnyBucket(a.pBuckets, h.PositiveBuckets, a.pSpans, h.PositiveSpans) || - counterResetInAnyBucket(a.nBuckets, h.NegativeBuckets, a.nSpans, h.NegativeSpans) { - counterReset, positiveInserts, negativeInserts = true, nil, nil - return - } - okToAppend = true return } +// expandIntSpansAndBuckets returns the inserts to expand the bucket spans 'a' so that +// they match the spans in 'b'. 'b' must cover the same or more buckets than +// 'a', otherwise the function will return false. +// The function also returns the inserts to expand 'b' to also cover all the +// buckets that are missing in 'b', but are present with 0 counter value in 'a'. +// The function also checks for counter resets between 'a' and 'b'. +// +// Example: +// +// Let's say the old buckets look like this: +// +// span syntax: [offset, length] +// spans : [ 0 , 2 ] [2,1] [ 3 , 2 ] [3,1] [1,1] +// bucket idx : [0] [1] 2 3 [4] 5 6 7 [8] [9] 10 11 12 [13] 14 [15] +// raw values 6 3 3 2 4 5 1 +// deltas 6 -3 0 -1 2 1 -4 +// +// But now we introduce a new bucket layout. (Carefully chosen example where we +// have a span appended, one unchanged[*], one prepended, and two merge - in +// that order.) +// +// [*] unchanged in terms of which bucket indices they represent. but to achieve +// that, their offset needs to change if "disrupted" by spans changing ahead of +// them +// +// \/ this one is "unchanged" +// spans : [ 0 , 3 ] [1,1] [ 1 , 4 ] [ 3 , 3 ] +// bucket idx : [0] [1] [2] 3 [4] 5 [6] [7] [8] [9] 10 11 12 [13] [14] [15] +// raw values 6 3 0 3 0 0 2 4 5 0 1 +// deltas 6 -3 -3 3 -3 0 2 2 1 -5 1 +// delta mods: / \ / \ / \ +// +// Note for histograms with delta-encoded buckets: Whenever any new buckets are +// introduced, the subsequent "old" bucket needs to readjust its delta to the +// new base of 0. Thus, for the caller who wants to transform the set of +// original deltas to a new set of deltas to match a new span layout that adds +// buckets, we simply need to generate a list of inserts. +// +// Note: Within expandSpansForward we don't have to worry about the changes to the +// spans themselves, thanks to the iterators we get to work with the more useful +// bucket indices (which of course directly correspond to the buckets we have to +// adjust). +func expandIntSpansAndBuckets(a, b []histogram.Span, aBuckets, bBuckets []int64) (forward, backward []Insert, ok bool) { + ai := newBucketIterator(a) + bi := newBucketIterator(b) + + var aInserts []Insert // To insert into buckets of a, to make up for missing buckets in b. + var bInserts []Insert // To insert into buckets of b, to make up for missing empty(!) buckets in a. + + // When aInter.num or bInter.num becomes > 0, this becomes a valid insert that should + // be yielded when we finish a streak of new buckets. + var aInter Insert + var bInter Insert + + aIdx, aOK := ai.Next() + bIdx, bOK := bi.Next() + + // Bucket count. Initialize the absolute count and index into the + // positive/negative counts or deltas array. The bucket count is + // used to detect counter reset as well as unused buckets in a. + var ( + aCount int64 + bCount int64 + aCountIdx int + bCountIdx int + ) + if aOK { + aCount = aBuckets[aCountIdx] + } + if bOK { + bCount = bBuckets[bCountIdx] + } + +loop: + for { + switch { + case aOK && bOK: + switch { + case aIdx == bIdx: // Both have an identical bucket index. + // Bucket count. Check bucket for reset from a to b. + if aCount > bCount { + return nil, nil, false + } + + // Finish WIP insert for a and reset. + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + + // Finish WIP insert for b and reset. + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + + aIdx, aOK = ai.Next() + bIdx, bOK = bi.Next() + aInter.pos++ // Advance potential insert position. + aCountIdx++ // Advance absolute bucket count index for a. + if aOK { + aCount += aBuckets[aCountIdx] + } + bInter.pos++ // Advance potential insert position. + bCountIdx++ // Advance absolute bucket count index for b. + if bOK { + bCount += bBuckets[bCountIdx] + } + + continue + case aIdx < bIdx: // b misses a bucket index that is in a. + // This is ok if the count in a is 0, in which case we make a note to + // fill in the bucket in b and advance a. + if aCount == 0 { + bInter.num++ // Mark that we need to insert a bucket in b. + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ + aCountIdx++ + if aOK { + aCount += aBuckets[aCountIdx] + } + continue + } + // Otherwise we are missing a bucket that was in use in a, which is a reset. + return nil, nil, false + case aIdx > bIdx: // a misses a value that is in b. Forward b and recompare. + aInter.num++ + aInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ + bCountIdx++ + if bOK { + bCount += bBuckets[bCountIdx] + } + } + case aOK && !bOK: // b misses a value that is in a. + // This is ok if the count in a is 0, in which case we make a note to + // fill in the bucket in b and advance a. + if aCount == 0 { + bInter.num++ + bInter.bucketIdx = aIdx + // Advance a + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + aInter.num = 0 + } + aIdx, aOK = ai.Next() + aInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for a. + aCountIdx++ + if aOK { + aCount += aBuckets[aCountIdx] + } + continue + } + // Otherwise we are missing a bucket that was in use in a, which is a reset. + return nil, nil, false + case !aOK && bOK: // a misses a value that is in b. Forward b and recompare. + aInter.num++ + aInter.bucketIdx = bIdx + // Advance b + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + bInter.num = 0 + } + bIdx, bOK = bi.Next() + bInter.pos++ // Advance potential insert position. + // Update absolute bucket counts for b. + bCountIdx++ + if bOK { + bCount += bBuckets[bCountIdx] + } + default: // Both iterators ran out. We're done. + if aInter.num > 0 { + aInserts = append(aInserts, aInter) + } + if bInter.num > 0 { + bInserts = append(bInserts, bInter) + } + break loop + } + } + + return aInserts, bInserts, true +} + // appendableGauge returns whether the chunk can be appended to, and if so // whether: -// 1. Any recoding needs to happen to the chunk using the provided inserts -// (in case of any new buckets, positive or negative range, respectively). +// 1. Any recoding needs to happen to the chunk using the provided forward +// inserts (in case of any new buckets, positive or negative range, +// respectively). // 2. Any recoding needs to happen for the histogram being appended, using the // backward inserts (in case of any missing buckets, positive or negative // range, respectively). @@ -369,76 +569,6 @@ func (a *HistogramAppender) appendableGauge(h *histogram.Histogram) ( return } -// counterResetInAnyBucket returns true if there was a counter reset for any -// bucket. This should be called only when the bucket layout is the same or new -// buckets were added. It does not handle the case of buckets missing. -func counterResetInAnyBucket(oldBuckets, newBuckets []int64, oldSpans, newSpans []histogram.Span) bool { - if len(oldSpans) == 0 || len(oldBuckets) == 0 { - return false - } - - var ( - oldSpanSliceIdx, newSpanSliceIdx int = -1, -1 // Index for the span slices. Starts at -1 to indicate that the first non empty span is not yet found. - oldInsideSpanIdx, newInsideSpanIdx uint32 // Index inside a span. - oldIdx, newIdx int32 // Index inside a bucket slice. - oldBucketSliceIdx, newBucketSliceIdx int // Index inside bucket slice. - ) - - // Find first non empty spans. - oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans) - newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans) - oldVal, newVal := oldBuckets[0], newBuckets[0] - - // Since we assume that new spans won't have missing buckets, there will never be a case - // where the old index will not find a matching new index. - for { - if oldIdx == newIdx { - if newVal < oldVal { - return true - } - } - - if oldIdx <= newIdx { - // Moving ahead old bucket and span by 1 index. - if oldInsideSpanIdx+1 >= oldSpans[oldSpanSliceIdx].Length { - // Current span is over. - oldSpanSliceIdx, oldIdx = nextNonEmptySpanSliceIdx(oldSpanSliceIdx, oldIdx, oldSpans) - oldInsideSpanIdx = 0 - if oldSpanSliceIdx >= len(oldSpans) { - // All old spans are over. - break - } - } else { - oldInsideSpanIdx++ - oldIdx++ - } - oldBucketSliceIdx++ - oldVal += oldBuckets[oldBucketSliceIdx] - } - - if oldIdx > newIdx { - // Moving ahead new bucket and span by 1 index. - if newInsideSpanIdx+1 >= newSpans[newSpanSliceIdx].Length { - // Current span is over. - newSpanSliceIdx, newIdx = nextNonEmptySpanSliceIdx(newSpanSliceIdx, newIdx, newSpans) - newInsideSpanIdx = 0 - if newSpanSliceIdx >= len(newSpans) { - // All new spans are over. - // This should not happen, old spans above should catch this first. - panic("new spans over before old spans in counterReset") - } - } else { - newInsideSpanIdx++ - newIdx++ - } - newBucketSliceIdx++ - newVal += newBuckets[newBucketSliceIdx] - } - } - - return false -} - // appendHistogram appends a histogram to the chunk. The caller must ensure that // the histogram is properly structured, e.g. the number of buckets used // corresponds to the number conveyed by the span structures. First call @@ -649,7 +779,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h a.setCounterResetHeader(CounterReset) case prev != nil: // This is a new chunk, but continued from a previous one. We need to calculate the reset header unless already set. - _, _, _, counterReset := prev.appendable(h) + _, _, _, _, _, counterReset := prev.appendable(h) if counterReset { a.setCounterResetHeader(CounterReset) } else { @@ -661,7 +791,7 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h // Adding counter-like histogram. if h.CounterResetHint != histogram.GaugeType { - pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h) + pForwardInserts, nForwardInserts, pBackwardInserts, nBackwardInserts, okToAppend, counterReset := a.appendable(h) if !okToAppend || counterReset { if appendOnly { if counterReset { @@ -681,6 +811,23 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h happ.appendHistogram(t, h) return newChunk, false, app, nil } + if len(pBackwardInserts) > 0 || len(nBackwardInserts) > 0 { + // The histogram needs to be expanded to have the extra empty buckets + // of the chunk. + if len(pForwardInserts) == 0 && len(nForwardInserts) == 0 { + // No new chunks from the histogram, so the spans of the appender can accommodate the new buckets. + // However we need to make a copy in case the input is sharing spans from an iterator. + h.PositiveSpans = make([]histogram.Span, len(a.pSpans)) + copy(h.PositiveSpans, a.pSpans) + h.NegativeSpans = make([]histogram.Span, len(a.nSpans)) + copy(h.NegativeSpans, a.nSpans) + } else { + // Spans need pre-adjusting to accommodate the new buckets. + h.PositiveSpans = adjustForInserts(h.PositiveSpans, pBackwardInserts) + h.NegativeSpans = adjustForInserts(h.NegativeSpans, nBackwardInserts) + } + a.recodeHistogram(h, pBackwardInserts, nBackwardInserts) + } if len(pForwardInserts) > 0 || len(nForwardInserts) > 0 { if appendOnly { return nil, false, a, fmt.Errorf("histogram layout change with %d positive and %d negative forwards inserts", len(pForwardInserts), len(nForwardInserts)) @@ -926,6 +1073,7 @@ func (it *histogramIterator) Reset(b []byte) { if it.atHistogramCalled { it.atHistogramCalled = false it.pBuckets, it.nBuckets = nil, nil + it.pSpans, it.nSpans = nil, nil } else { it.pBuckets = it.pBuckets[:0] it.nBuckets = it.nBuckets[:0] @@ -1038,8 +1186,25 @@ func (it *histogramIterator) Next() ValueType { // The case for the 2nd sample with single deltas is implicitly handled correctly with the double delta code, // so we don't need a separate single delta logic for the 2nd sample. - // Recycle bucket slices that have not been returned yet. Otherwise, + // Recycle bucket and span slices that have not been returned yet. Otherwise, copy them. // copy them. + if it.atFloatHistogramCalled || it.atHistogramCalled { + if len(it.pSpans) > 0 { + newSpans := make([]histogram.Span, len(it.pSpans)) + copy(newSpans, it.pSpans) + it.pSpans = newSpans + } else { + it.pSpans = nil + } + if len(it.nSpans) > 0 { + newSpans := make([]histogram.Span, len(it.nSpans)) + copy(newSpans, it.nSpans) + it.nSpans = newSpans + } else { + it.nSpans = nil + } + } + if it.atHistogramCalled { it.atHistogramCalled = false if len(it.pBuckets) > 0 { @@ -1057,6 +1222,7 @@ func (it *histogramIterator) Next() ValueType { it.nBuckets = nil } } + // FloatBuckets are set from scratch, so simply create empty ones. if it.atFloatHistogramCalled { it.atFloatHistogramCalled = false diff --git a/tsdb/chunkenc/histogram_meta.go b/tsdb/chunkenc/histogram_meta.go index c5381ba2f..8d614b817 100644 --- a/tsdb/chunkenc/histogram_meta.go +++ b/tsdb/chunkenc/histogram_meta.go @@ -278,8 +278,15 @@ func (b *bucketIterator) Next() (int, bool) { type Insert struct { pos int num int + + // Optional: bucketIdx is the index of the bucket that is inserted. + // Can be used to adjust spans. + bucketIdx int } +// Deprecated: expandSpansForward, use expandIntSpansAndBuckets or +// expandFloatSpansAndBuckets instead. +// expandSpansForward is left here for reference. // expandSpansForward returns the inserts to expand the bucket spans 'a' so that // they match the spans in 'b'. 'b' must cover the same or more buckets than // 'a', otherwise the function will return false. @@ -575,14 +582,64 @@ func counterResetHint(crh CounterResetHeader, numRead uint16) histogram.CounterR } } -// Handle pathological case of empty span when advancing span idx. -// Call it with idx==-1 to find the first non empty span. -func nextNonEmptySpanSliceIdx(idx int, bucketIdx int32, spans []histogram.Span) (newIdx int, newBucketIdx int32) { - for idx++; idx < len(spans); idx++ { - if spans[idx].Length > 0 { - return idx, bucketIdx + spans[idx].Offset + 1 - } - bucketIdx += spans[idx].Offset +// adjustForInserts adjusts the spans for the given inserts. +func adjustForInserts(spans []histogram.Span, inserts []Insert) (mergedSpans []histogram.Span) { + if len(inserts) == 0 { + return spans } - return idx, 0 + + it := newBucketIterator(spans) + + var ( + lastBucket int + i int + insertIdx = inserts[i].bucketIdx + insertNum = inserts[i].num + ) + + addBucket := func(b int) { + offset := b - lastBucket - 1 + if offset == 0 && len(mergedSpans) > 0 { + mergedSpans[len(mergedSpans)-1].Length++ + } else { + if len(mergedSpans) == 0 { + offset++ + } + mergedSpans = append(mergedSpans, histogram.Span{ + Offset: int32(offset), + Length: 1, + }) + } + + lastBucket = b + } + consumeInsert := func() { + // Consume the insert. + insertNum-- + if insertNum == 0 { + i++ + if i < len(inserts) { + insertIdx = inserts[i].bucketIdx + insertNum = inserts[i].num + } + } else { + insertIdx++ + } + } + + bucket, ok := it.Next() + for ok { + if i < len(inserts) && insertIdx < bucket { + addBucket(insertIdx) + consumeInsert() + } else { + addBucket(bucket) + bucket, ok = it.Next() + } + } + for i < len(inserts) { + addBucket(inserts[i].bucketIdx) + consumeInsert() + } + return } diff --git a/tsdb/chunkenc/histogram_test.go b/tsdb/chunkenc/histogram_test.go index d029aaefc..29b77b158 100644 --- a/tsdb/chunkenc/histogram_test.go +++ b/tsdb/chunkenc/histogram_test.go @@ -256,9 +256,11 @@ func TestHistogramChunkBucketChanges(t *testing.T) { h2.NegativeBuckets = []int64{2, -1} // 2 1 (total 3) // This is how span changes will be handled. hApp, _ := app.(*HistogramAppender) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.NotEmpty(t, posInterjections) require.NotEmpty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.True(t, ok) // Only new buckets came in. require.False(t, cr) c, app = hApp.recode(posInterjections, negInterjections, h2.PositiveSpans, h2.NegativeSpans) @@ -347,7 +349,7 @@ func TestHistogramChunkAppendable(t *testing.T) { c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Schema++ - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -357,7 +359,7 @@ func TestHistogramChunkAppendable(t *testing.T) { c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.ZeroThreshold += 0.1 - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -380,9 +382,11 @@ func TestHistogramChunkAppendable(t *testing.T) { // so the new histogram should have new counts >= these per-bucket counts, e.g.: h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.NotEmpty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -401,24 +405,96 @@ func TestHistogramChunkAppendable(t *testing.T) { h2.Sum = 21 h2.PositiveBuckets = []int64{6, -3, -1, 2, 1, -4} // counts: 6, 3, 2, 4, 5, 1 (total 21) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) } + { // New histogram that has buckets missing but the buckets missing were empty. + emptyBucketH := eh.Copy() + emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12) + c, hApp, ts, h1 := setup(emptyBucketH) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9. + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 4, Length: 1}, + {Offset: 1, Length: 1}, + } + savedH2Spans := h2.PositiveSpans + h2.PositiveBuckets = []int64{7, -5, 1, 0, 1} // counts: 7, 2, 3, 3, 4 (total 18) + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + + // Check that h2 was recoded. + require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 4 (total 18) + require.Equal(t, emptyBucketH.PositiveSpans, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") + } + + { // New histogram that has new buckets AND buckets missing but the buckets missing were empty. + emptyBucketH := eh.Copy() + emptyBucketH.PositiveBuckets = []int64{6, -6, 1, 1, -2, 1, 1} // counts: 6, 0, 1, 2, 0, 1, 2 (total 12) + c, hApp, ts, h1 := setup(emptyBucketH) + h2 := h1.Copy() + h2.PositiveSpans = []histogram.Span{ // Missing buckets at offset 1 and 9. + {Offset: 0, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 3, Length: 1}, + {Offset: 4, Length: 1}, + {Offset: 1, Length: 2}, + } + savedH2Spans := h2.PositiveSpans + h2.PositiveBuckets = []int64{7, -5, 1, 0, 1, 1} // counts: 7, 2, 3, 3, 4, 5 (total 23) + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.NotEmpty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertRecodedHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + + // Check that h2 was recoded. + require.Equal(t, []int64{7, -7, 2, 1, -3, 3, 1, 1}, h2.PositiveBuckets) // counts: 7, 0, 2, 3 , 0, 3, 5 (total 23) + require.Equal(t, []histogram.Span{ + {Offset: 0, Length: 2}, // Added empty bucket. + {Offset: 2, Length: 1}, // Existing - offset adjusted. + {Offset: 3, Length: 2}, // Added empty bucket. + {Offset: 3, Length: 1}, // Existing - offset adjusted. + {Offset: 1, Length: 2}, // Existing. + }, h2.PositiveSpans) + require.NotEqual(t, savedH2Spans, h2.PositiveSpans, "recoding must make a copy") + } + { // New histogram that has a counter reset while buckets are same. c, hApp, ts, h1 := setup(eh) h2 := h1.Copy() h2.Sum = 23 h2.PositiveBuckets = []int64{6, -4, 1, -1, 2, 1, -4} // counts: 6, 2, 3, 2, 4, 5, 1 (total 23) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -440,9 +516,11 @@ func TestHistogramChunkAppendable(t *testing.T) { // so the new histogram should have new counts >= these per-bucket counts, e.g.: h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 0} // 7 5 1 3 1 0 2 5 5 0 0 (total 29) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -470,9 +548,11 @@ func TestHistogramChunkAppendable(t *testing.T) { // so the new histogram should have new counts >= these per-bucket counts, e.g.: h2.PositiveBuckets = []int64{1, 1, 3, -2, 0, -1, 2, 1, -4} // counts: 1, 2, 5, 3, 3, 2, 4, 5, 1 (total 26) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.Empty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.False(t, ok) // Need to cut a new chunk. require.True(t, cr) @@ -549,10 +629,44 @@ func TestHistogramChunkAppendable(t *testing.T) { require.Equal(t, NotCounterReset, nextChunk.GetCounterResetHeader()) } + { + // Start a new chunk with a histogram that has an empty bucket. + // Add a histogram that has the same bucket missing. + // This should be appendable and can happen if we are merging from chunks + // where the first sample came from a recoded chunk that added the + // empty bucket. + h1 := eh.Copy() + // Add a bucket that is empty -10 offsets from the first bucket. + h1.PositiveSpans = make([]histogram.Span, len(eh.PositiveSpans)+1) + h1.PositiveSpans[0] = histogram.Span{Offset: eh.PositiveSpans[0].Offset - 10, Length: 1} + h1.PositiveSpans[1] = histogram.Span{Offset: eh.PositiveSpans[0].Offset + 9, Length: eh.PositiveSpans[0].Length} + for i, v := range eh.PositiveSpans[1:] { + h1.PositiveSpans[i+2] = v + } + h1.PositiveBuckets = make([]int64, len(eh.PositiveBuckets)+1) + h1.PositiveBuckets[0] = 0 + for i, v := range eh.PositiveBuckets { + h1.PositiveBuckets[i+1] = v + } + + c, hApp, ts, _ := setup(h1) + h2 := eh.Copy() + + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) + require.Empty(t, posInterjections) + require.Empty(t, negInterjections) + require.NotEmpty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) + require.True(t, ok) + require.False(t, cr) + + assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) + } + { // Custom buckets, no change. c, hApp, ts, h1 := setup(cbh) h2 := h1.Copy() - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.True(t, ok) assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -563,7 +677,7 @@ func TestHistogramChunkAppendable(t *testing.T) { h2 := h1.Copy() h2.Count++ h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -3} - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.True(t, ok) assertNoNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, UnknownCounterReset) @@ -574,7 +688,7 @@ func TestHistogramChunkAppendable(t *testing.T) { h2 := h1.Copy() h2.Count-- h2.PositiveBuckets = []int64{6, -3, 0, -1, 2, 1, -5} - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) @@ -584,7 +698,7 @@ func TestHistogramChunkAppendable(t *testing.T) { c, hApp, ts, h1 := setup(cbh) h2 := h1.Copy() h2.CustomValues = []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} - _, _, ok, _ := hApp.appendable(h2) + _, _, _, _, ok, _ := hApp.appendable(h2) require.False(t, ok) assertNewHistogramChunkOnAppend(t, c, hApp, ts+1, h2, CounterReset) @@ -606,9 +720,11 @@ func TestHistogramChunkAppendable(t *testing.T) { // so the new histogram should have new counts >= these per-bucket counts, e.g.: h2.PositiveBuckets = []int64{7, -2, -4, 2, -2, -1, 2, 3, 0, -5, 1} // 7 5 1 3 1 0 2 5 5 0 1 (total 30) - posInterjections, negInterjections, ok, cr := hApp.appendable(h2) + posInterjections, negInterjections, backwardPositiveInserts, backwardNegativeInserts, ok, cr := hApp.appendable(h2) require.NotEmpty(t, posInterjections) require.Empty(t, negInterjections) + require.Empty(t, backwardPositiveInserts) + require.Empty(t, backwardNegativeInserts) require.True(t, ok) // Only new buckets came in. require.False(t, cr) @@ -875,9 +991,11 @@ func TestHistogramChunkAppendableWithEmptySpan(t *testing.T) { require.Equal(t, 1, c.NumSamples()) hApp, _ := app.(*HistogramAppender) - pI, nI, okToAppend, counterReset := hApp.appendable(tc.h2) + pI, nI, bpI, bnI, okToAppend, counterReset := hApp.appendable(tc.h2) require.Empty(t, pI) require.Empty(t, nI) + require.Empty(t, bpI) + require.Empty(t, bnI) require.True(t, okToAppend) require.False(t, counterReset) }) @@ -1368,3 +1486,152 @@ func TestHistogramAppendOnlyErrors(t *testing.T) { require.EqualError(t, err, "histogram counter reset") }) } + +func TestHistogramUniqueSpansAfterNext(t *testing.T) { + // Create two histograms with the same schema and spans. + h1 := &histogram.Histogram{ + Schema: 1, + ZeroThreshold: 1e-100, + Count: 10, + ZeroCount: 2, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, 3, 4}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{2}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, rh1 := it.AtHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValHistogram, it.Next()) + _, rh2 := it.AtHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + + // Check that the spans for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") +} + +func TestHistogramUniqueSpansAfterNextWithAtFloatHistogram(t *testing.T) { + // Create two histograms with the same schema and spans. + h1 := &histogram.Histogram{ + Schema: 1, + ZeroThreshold: 1e-100, + Count: 10, + ZeroCount: 2, + Sum: 15.0, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: 2}, + {Offset: 1, Length: 2}, + }, + PositiveBuckets: []int64{1, 2, 3, 4}, + NegativeSpans: []histogram.Span{ + {Offset: 1, Length: 1}, + }, + NegativeBuckets: []int64{2}, + } + + h2 := h1.Copy() + + // Create a chunk and append both histograms. + c := NewHistogramChunk() + app, err := c.Appender() + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 0, h1, false) + require.NoError(t, err) + + _, _, _, err = app.AppendHistogram(nil, 1, h2, false) + require.NoError(t, err) + + // Create an iterator and advance to the first histogram. + it := c.Iterator(nil) + require.Equal(t, ValHistogram, it.Next()) + _, rh1 := it.AtFloatHistogram(nil) + + // Advance to the second histogram and retrieve it. + require.Equal(t, ValHistogram, it.Next()) + _, rh2 := it.AtFloatHistogram(nil) + + require.Equal(t, rh1.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh1.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + require.Equal(t, rh2.PositiveSpans, h1.PositiveSpans, "Returned positive spans are as expected") + require.Equal(t, rh2.NegativeSpans, h1.NegativeSpans, "Returned negative spans are as expected") + + // Check that the spans for h1 and h2 are unique slices. + require.NotSame(t, &rh1.PositiveSpans[0], &rh2.PositiveSpans[0], "PositiveSpans should be unique between histograms") + require.NotSame(t, &rh1.NegativeSpans[0], &rh2.NegativeSpans[0], "NegativeSpans should be unique between histograms") +} + +func BenchmarkAppendable(b *testing.B) { + // Create a histogram with a bunch of spans and buckets. + const ( + numSpans = 1000 + spanLength = 10 + ) + h := &histogram.Histogram{ + Schema: 0, + Count: 100, + Sum: 1000, + ZeroThreshold: 0.001, + ZeroCount: 5, + } + for i := 0; i < numSpans; i++ { + h.PositiveSpans = append(h.PositiveSpans, histogram.Span{Offset: 5, Length: spanLength}) + h.NegativeSpans = append(h.NegativeSpans, histogram.Span{Offset: 5, Length: spanLength}) + for j := 0; j < spanLength; j++ { + h.PositiveBuckets = append(h.PositiveBuckets, int64(j)) + h.NegativeBuckets = append(h.NegativeBuckets, int64(j)) + } + } + + c := Chunk(NewHistogramChunk()) + + // Create fresh appender and add the first histogram. + app, err := c.Appender() + if err != nil { + b.Fatal(err) + } + + _, _, _, err = app.AppendHistogram(nil, 1, h, true) + if err != nil { + b.Fatal(err) + } + + hApp := app.(*HistogramAppender) + + isAppendable := true + for i := 0; i < b.N; i++ { + _, _, _, _, ok, _ := hApp.appendable(h) + isAppendable = isAppendable && ok + } + if !isAppendable { + b.Fail() + } +} diff --git a/tsdb/chunks/head_chunks.go b/tsdb/chunks/head_chunks.go index 6c8707c57..876b42cb2 100644 --- a/tsdb/chunks/head_chunks.go +++ b/tsdb/chunks/head_chunks.go @@ -191,7 +191,7 @@ func (f *chunkPos) bytesToWriteForChunk(chkLen uint64) uint64 { // ChunkDiskMapper is for writing the Head block chunks to disk // and access chunks via mmapped files. type ChunkDiskMapper struct { - /// Writer. + // Writer. dir *os.File writeBufferSize int @@ -210,7 +210,7 @@ type ChunkDiskMapper struct { crc32 hash.Hash writePathMtx sync.Mutex - /// Reader. + // Reader. // The int key in the map is the file number on the disk. mmappedChunkFiles map[int]*mmappedChunkFile // Contains the m-mapped files for each chunk file mapped with its index. closers map[int]io.Closer // Closers for resources behind the byte slices. diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index 4a4d89e81..b2aa39c3b 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -408,7 +408,7 @@ func TestChunkDiskMapper_Truncate_WriteQueueRaceCondition(t *testing.T) { wg.Wait() } -// TestHeadReadWriter_TruncateAfterIterateChunksError tests for +// TestHeadReadWriter_TruncateAfterFailedIterateChunks tests for // https://github.com/prometheus/prometheus/issues/7753 func TestHeadReadWriter_TruncateAfterFailedIterateChunks(t *testing.T) { hrw := createChunkDiskMapper(t, "") diff --git a/tsdb/chunks/queue_test.go b/tsdb/chunks/queue_test.go index 9f761a5f3..3d9275eee 100644 --- a/tsdb/chunks/queue_test.go +++ b/tsdb/chunks/queue_test.go @@ -69,16 +69,16 @@ func TestQueuePushPopSingleGoroutine(t *testing.T) { const maxSize = 500 const maxIters = 50 - for max := 1; max < maxSize; max++ { - queue := newWriteJobQueue(max, 1+(r.Int()%max)) + for maxCount := 1; maxCount < maxSize; maxCount++ { + queue := newWriteJobQueue(maxCount, 1+(r.Int()%maxCount)) elements := 0 // total elements in the queue lastWriteID := 0 lastReadID := 0 for iter := 0; iter < maxIters; iter++ { - if elements < max { - toWrite := r.Int() % (max - elements) + if elements < maxCount { + toWrite := r.Int() % (maxCount - elements) if toWrite == 0 { toWrite = 1 } diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 0df6ca050..e7998abf7 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -22,6 +22,7 @@ import ( "os" "path" "path/filepath" + "runtime" "strconv" "sync" "testing" @@ -1925,3 +1926,229 @@ func TestCompactEmptyResultBlockWithTombstone(t *testing.T) { require.Nil(t, ulids) require.NoError(t, block.Close()) } + +func TestDelayedCompaction(t *testing.T) { + // The delay is chosen in such a way as to not slow down the tests, but also to make + // the effective compaction duration negligible compared to it, so that the duration comparisons make sense. + delay := 1000 * time.Millisecond + + waitUntilCompactedAndCheck := func(db *DB) { + t.Helper() + start := time.Now() + for db.head.compactable() { + // This simulates what happens at the end of commits, for less busy DB, a compaction + // is triggered every minute. This is to speed up the test. + select { + case db.compactc <- struct{}{}: + default: + } + time.Sleep(time.Millisecond) + } + duration := time.Since(start) + // Only waited for one offset: offset<=delay<<<2*offset + require.Greater(t, duration, db.opts.CompactionDelay) + require.Less(t, duration, 2*db.opts.CompactionDelay) + } + + compactAndCheck := func(db *DB) { + t.Helper() + start := time.Now() + db.Compact(context.Background()) + for db.head.compactable() { + time.Sleep(time.Millisecond) + } + if runtime.GOOS == "windows" { + // TODO: enable on windows once ms resolution timers are better supported. + return + } + duration := time.Since(start) + require.Less(t, duration, delay) + } + + cases := []struct { + name string + // The delays are chosen in such a way as to not slow down the tests, but also in a way to make the + // effective compaction duration negligible compared to them, so that the duration comparisons make sense. + compactionDelay time.Duration + }{ + { + "delayed compaction not enabled", + 0, + }, + { + "delayed compaction enabled", + delay, + }, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + var options *Options + if c.compactionDelay > 0 { + options = &Options{CompactionDelay: c.compactionDelay} + } + db := openTestDB(t, options, []int64{10}) + defer func() { + require.NoError(t, db.Close()) + }() + + label := labels.FromStrings("foo", "bar") + + // The first compaction is expected to result in 1 block. + db.DisableCompactions() + app := db.Appender(context.Background()) + _, err := app.Append(0, label, 0, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 11, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 21, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + if c.compactionDelay == 0 { + // When delay is not enabled, compaction should run on the first trigger. + compactAndCheck(db) + } else { + db.EnableCompactions() + waitUntilCompactedAndCheck(db) + // The db.compactc signals have been processed multiple times since a compaction is triggered every 1ms by waitUntilCompacted. + // This implies that the compaction delay doesn't block or wait on the initial trigger. + // 3 is an arbitrary value because it's difficult to determine the precise value. + require.GreaterOrEqual(t, prom_testutil.ToFloat64(db.metrics.compactionsTriggered)-prom_testutil.ToFloat64(db.metrics.compactionsSkipped), 3.0) + // The delay doesn't change the head blocks alignment. + require.Eventually(t, func() bool { + return db.head.MinTime() == db.compactor.(*LeveledCompactor).ranges[0]+1 + }, 500*time.Millisecond, 10*time.Millisecond) + // One compaction was run and one block was produced. + require.Equal(t, 1.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)) + } + + // The second compaction is expected to result in 2 blocks. + // This ensures that the logic for compaction delay doesn't only work for the first compaction, but also takes into account the future compactions. + // This also ensures that no delay happens between consecutive compactions. + db.DisableCompactions() + app = db.Appender(context.Background()) + _, err = app.Append(0, label, 31, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 41, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + if c.compactionDelay == 0 { + // Compaction should still run on the first trigger. + compactAndCheck(db) + } else { + db.EnableCompactions() + waitUntilCompactedAndCheck(db) + } + + // Two other compactions were run. + require.Eventually(t, func() bool { + return prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran) == 3.0 + }, 500*time.Millisecond, 10*time.Millisecond) + + if c.compactionDelay == 0 { + return + } + + // This test covers a special case. If auto compaction is in a delay period and a manual compaction is triggered, + // auto compaction should stop waiting for the delay if the head is no longer compactable. + // Of course, if the head is still compactable after the manual compaction, auto compaction will continue waiting for the same delay. + getTimeWhenCompactionDelayStarted := func() time.Time { + t.Helper() + db.cmtx.Lock() + defer db.cmtx.Unlock() + return db.timeWhenCompactionDelayStarted + } + + db.DisableCompactions() + app = db.Appender(context.Background()) + _, err = app.Append(0, label, 51, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + + require.True(t, db.head.compactable()) + db.EnableCompactions() + // Trigger an auto compaction. + db.compactc <- struct{}{} + // That made auto compaction start waiting for the delay. + require.Eventually(t, func() bool { + return !getTimeWhenCompactionDelayStarted().IsZero() + }, 100*time.Millisecond, 10*time.Millisecond) + // Trigger a manual compaction. + require.NoError(t, db.CompactHead(NewRangeHead(db.Head(), 0, 50.0))) + require.Equal(t, 4.0, prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)) + // Re-trigger an auto compaction. + db.compactc <- struct{}{} + // That made auto compaction stop waiting for the delay. + require.Eventually(t, func() bool { + return getTimeWhenCompactionDelayStarted().IsZero() + }, 100*time.Millisecond, 10*time.Millisecond) + }) + } +} + +// TestDelayedCompactionDoesNotBlockUnrelatedOps makes sure that when delayed compaction is enabled, +// operations that don't directly derive from the Head compaction are not delayed, here we consider disk blocks compaction. +func TestDelayedCompactionDoesNotBlockUnrelatedOps(t *testing.T) { + cases := []struct { + name string + whenCompactable bool + }{ + { + "Head is compactable", + true, + }, + { + "Head is not compactable", + false, + }, + } + + for _, c := range cases { + c := c + t.Run(c.name, func(t *testing.T) { + t.Parallel() + + tmpdir := t.TempDir() + // Some blocks that need compation are present. + createBlock(t, tmpdir, genSeries(1, 1, 0, 100)) + createBlock(t, tmpdir, genSeries(1, 1, 100, 200)) + createBlock(t, tmpdir, genSeries(1, 1, 200, 300)) + + options := DefaultOptions() + // This will make the test timeout if compaction really waits for it. + options.CompactionDelay = time.Hour + db, err := open(tmpdir, log.NewNopLogger(), nil, options, []int64{10, 200}, nil) + require.NoError(t, err) + defer func() { + require.NoError(t, db.Close()) + }() + + db.DisableCompactions() + require.Len(t, db.Blocks(), 3) + + if c.whenCompactable { + label := labels.FromStrings("foo", "bar") + app := db.Appender(context.Background()) + _, err := app.Append(0, label, 301, 0) + require.NoError(t, err) + _, err = app.Append(0, label, 317, 0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + // The Head is compactable and will still be at the end. + require.True(t, db.head.compactable()) + defer func() { + require.True(t, db.head.compactable()) + }() + } + + // The blocks were compacted. + db.Compact(context.Background()) + require.Len(t, db.Blocks(), 2) + }) + } +} diff --git a/tsdb/db.go b/tsdb/db.go index 090d6fcf0..a5b3a5e60 100644 --- a/tsdb/db.go +++ b/tsdb/db.go @@ -21,6 +21,7 @@ import ( "io" "io/fs" "math" + "math/rand" "os" "path/filepath" "slices" @@ -48,7 +49,7 @@ import ( ) const ( - // Default duration of a block in milliseconds. + // DefaultBlockDuration in milliseconds. DefaultBlockDuration = int64(2 * time.Hour / time.Millisecond) // Block dir suffixes to make deletion and creation operations atomic. @@ -84,6 +85,8 @@ func DefaultOptions() *Options { OutOfOrderCapMax: DefaultOutOfOrderCapMax, EnableOverlappingCompaction: true, EnableSharding: false, + EnableDelayedCompaction: false, + CompactionDelay: time.Duration(0), } } @@ -184,12 +187,18 @@ type Options struct { // The reason why this flag exists is because there are various users of the TSDB // that do not want vertical compaction happening on ingest time. Instead, // they'd rather keep overlapping blocks and let another component do the overlapping compaction later. - // For Prometheus, this will always be true. EnableOverlappingCompaction bool // EnableSharding enables query sharding support in TSDB. EnableSharding bool + // EnableDelayedCompaction, when set to true, assigns a random value to CompactionDelay during DB opening. + // When set to false, delayed compaction is disabled, unless CompactionDelay is set directly. + EnableDelayedCompaction bool + // CompactionDelay delays the start time of auto compactions. + // It can be increased by up to one minute if the DB does not commit too often. + CompactionDelay time.Duration + // NewCompactorFunc is a function that returns a TSDB compactor. NewCompactorFunc NewCompactorFunc @@ -246,6 +255,9 @@ type DB struct { // Cancel a running compaction when a shutdown is initiated. compactCancel context.CancelFunc + // timeWhenCompactionDelayStarted helps delay the compactions start time. + timeWhenCompactionDelayStarted time.Time + // oooWasEnabled is true if out of order support was enabled at least one time // during the time TSDB was up. In which case we need to keep supporting // out-of-order compaction and vertical queries. @@ -681,7 +693,7 @@ func (db *DBReadOnly) LastBlockID() (string, error) { return "", err } - max := uint64(0) + maxT := uint64(0) lastBlockID := "" @@ -693,8 +705,8 @@ func (db *DBReadOnly) LastBlockID() (string, error) { continue // Not a block dir. } timestamp := ulidObj.Time() - if timestamp > max { - max = timestamp + if timestamp > maxT { + maxT = timestamp lastBlockID = dirName } } @@ -998,6 +1010,10 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.oooWasEnabled.Store(true) } + if opts.EnableDelayedCompaction { + opts.CompactionDelay = db.generateCompactionDelay() + } + go db.run(ctx) return db, nil @@ -1186,6 +1202,12 @@ func (a dbAppender) Commit() error { return err } +// waitingForCompactionDelay returns true if the DB is waiting for the Head compaction delay. +// This doesn't guarantee that the Head is really compactable. +func (db *DB) waitingForCompactionDelay() bool { + return time.Since(db.timeWhenCompactionDelayStarted) < db.opts.CompactionDelay +} + // Compact data if possible. After successful compaction blocks are reloaded // which will also delete the blocks that fall out of the retention window. // Old blocks are only deleted on reloadBlocks based on the new block's parent information. @@ -1219,7 +1241,21 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { return nil default: } + if !db.head.compactable() { + // Reset the counter once the head compactions are done. + // This would also reset it if a manual compaction was triggered while the auto compaction was in its delay period. + if !db.timeWhenCompactionDelayStarted.IsZero() { + db.timeWhenCompactionDelayStarted = time.Time{} + } + break + } + + if db.timeWhenCompactionDelayStarted.IsZero() { + // Start counting for the delay. + db.timeWhenCompactionDelayStarted = time.Now() + } + if db.waitingForCompactionDelay() { break } mint := db.head.MinTime() @@ -1295,6 +1331,9 @@ func (db *DB) CompactOOOHead(ctx context.Context) error { return db.compactOOOHead(ctx) } +// Callback for testing. +var compactOOOHeadTestingCallback func() + func (db *DB) compactOOOHead(ctx context.Context) error { if !db.oooWasEnabled.Load() { return nil @@ -1304,6 +1343,11 @@ func (db *DB) compactOOOHead(ctx context.Context) error { return fmt.Errorf("get ooo compaction head: %w", err) } + if compactOOOHeadTestingCallback != nil { + compactOOOHeadTestingCallback() + compactOOOHeadTestingCallback = nil + } + ulids, err := db.compactOOO(db.dir, oooHead) if err != nil { return fmt.Errorf("compact ooo head: %w", err) @@ -1421,7 +1465,7 @@ func (db *DB) compactBlocks() (err error) { // If we have a lot of blocks to compact the whole process might take // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. - if db.head.compactable() { + if db.head.compactable() && !db.waitingForCompactionDelay() { level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block") return nil } @@ -1924,6 +1968,11 @@ func (db *DB) EnableCompactions() { level.Info(db.logger).Log("msg", "Compactions enabled") } +func (db *DB) generateCompactionDelay() time.Duration { + // Up to 10% of the head's chunkRange. + return time.Duration(rand.Int63n(db.head.chunkRange.Load()/10)) * time.Millisecond +} + // ForceHeadMMap is intended for use only in tests and benchmarks. func (db *DB) ForceHeadMMap() { db.head.mmapHeadChunks() @@ -1980,7 +2029,7 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } } - blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + blockQueriers := make([]storage.Querier, 0, len(blocks)+1) // +1 to allow for possible head querier. defer func() { if err != nil { @@ -1992,10 +2041,12 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { } }() - if maxt >= db.head.MinTime() { + overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) + var headQuerier storage.Querier + if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) var err error - inOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt) + headQuerier, err = db.blockQuerierFunc(rh, mint, maxt) if err != nil { return nil, fmt.Errorf("open block querier for head %s: %w", rh, err) } @@ -2005,36 +2056,28 @@ func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := inOrderHeadQuerier.Close(); err != nil { + if err := headQuerier.Close(); err != nil { return nil, fmt.Errorf("closing head block querier %s: %w", rh, err) } - inOrderHeadQuerier = nil + headQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - inOrderHeadQuerier, err = db.blockQuerierFunc(rh, newMint, maxt) + headQuerier, err = db.blockQuerierFunc(rh, newMint, maxt) if err != nil { return nil, fmt.Errorf("open block querier for head while getting new querier %s: %w", rh, err) } } - - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } } - if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) - var err error - outOfOrderHeadQuerier, err := db.blockQuerierFunc(rh, mint, maxt) - if err != nil { - // If BlockQuerierFunc() failed, make sure to clean up the pending read created by NewOOORangeHead. - rh.isoState.Close() + if overlapsOOO { + // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. + isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) + headQuerier = NewHeadAndOOOQuerier(mint, maxt, db.head, isoState, headQuerier) + } - return nil, fmt.Errorf("open block querier for ooo head %s: %w", rh, err) - } - - blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + if headQuerier != nil { + blockQueriers = append(blockQueriers, headQuerier) } for _, b := range blocks { @@ -2062,7 +2105,7 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } } - blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+1) // +1 to allow for possible head querier. defer func() { if err != nil { @@ -2074,9 +2117,11 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer } }() - if maxt >= db.head.MinTime() { + overlapsOOO := overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) + var headQuerier storage.ChunkQuerier + if maxt >= db.head.MinTime() || overlapsOOO { rh := NewRangeHead(db.head, mint, maxt) - inOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt) + headQuerier, err = db.blockChunkQuerierFunc(rh, mint, maxt) if err != nil { return nil, fmt.Errorf("open querier for head %s: %w", rh, err) } @@ -2086,35 +2131,28 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuer // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := inOrderHeadQuerier.Close(); err != nil { + if err := headQuerier.Close(); err != nil { return nil, fmt.Errorf("closing head querier %s: %w", rh, err) } - inOrderHeadQuerier = nil + headQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - inOrderHeadQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt) + headQuerier, err = db.blockChunkQuerierFunc(rh, newMint, maxt) if err != nil { return nil, fmt.Errorf("open querier for head while getting new querier %s: %w", rh, err) } } - - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } } - if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) - outOfOrderHeadQuerier, err := db.blockChunkQuerierFunc(rh, mint, maxt) - if err != nil { - // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. - rh.isoState.Close() + if overlapsOOO { + // We need to fetch from in-order and out-of-order chunks: wrap the headQuerier. + isoState := db.head.oooIso.TrackReadAfter(db.lastGarbageCollectedMmapRef) + headQuerier = NewHeadAndOOOChunkQuerier(mint, maxt, db.head, isoState, headQuerier) + } - return nil, fmt.Errorf("open block chunk querier for ooo head %s: %w", rh, err) - } - - blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + if headQuerier != nil { + blockQueriers = append(blockQueriers, headQuerier) } for _, b := range blocks { @@ -2278,13 +2316,13 @@ func blockDirs(dir string) ([]string, error) { return dirs, nil } -func exponential(d, min, max time.Duration) time.Duration { +func exponential(d, minD, maxD time.Duration) time.Duration { d *= 2 - if d < min { - d = min + if d < minD { + d = minD } - if d > max { - d = max + if d > maxD { + d = maxD } return d } diff --git a/tsdb/db_test.go b/tsdb/db_test.go index c8dad8699..4e3a077f6 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -2690,8 +2690,9 @@ func TestDBReadOnly_Querier_NoAlteration(t *testing.T) { require.NoError(t, db.Close()) // Simulate a corrupted chunk: without a header. - _, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001")) + chunk, err := os.Create(path.Join(mmappedChunksDir(db.dir), "000001")) require.NoError(t, err) + require.NoError(t, chunk.Close()) spinUpQuerierAndCheck(db.dir, t.TempDir(), 1) @@ -4500,12 +4501,15 @@ func TestMetadataAssertInMemoryData(t *testing.T) { func TestOOOCompaction(t *testing.T) { for name, scenario := range sampleTypeScenarios { t.Run(name, func(t *testing.T) { - testOOOCompaction(t, scenario) + testOOOCompaction(t, scenario, false) + }) + t.Run(name+"+extra", func(t *testing.T) { + testOOOCompaction(t, scenario, true) }) } } -func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { +func testOOOCompaction(t *testing.T, scenario sampleTypeScenario, addExtraSamples bool) { dir := t.TempDir() ctx := context.Background() @@ -4525,8 +4529,8 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { addSample := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4536,7 +4540,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { } // Add an in-order samples. - addSample(250, 350) + addSample(250, 300) // Verify that the in-memory ooo chunk is empty. checkEmptyOOOChunk := func(lbls labels.Labels) { @@ -4550,18 +4554,20 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { // Add ooo samples that creates multiple chunks. // 90 to 300 spans across 3 block ranges: [0, 120), [120, 240), [240, 360) - addSample(90, 310) + addSample(90, 300) // Adding same samples to create overlapping chunks. // Since the active chunk won't start at 90 again, all the new // chunks will have different time ranges than the previous chunks. - addSample(90, 310) + addSample(90, 300) + + var highest int64 = 300 verifyDBSamples := func() { var series1Samples, series2Samples []chunks.Sample - for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, 350}} { + for _, r := range [][2]int64{{90, 119}, {120, 239}, {240, highest}} { fromMins, toMins := r[0], r[1] - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4586,7 +4592,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, err) require.False(t, created) require.Positive(t, ms.ooo.oooHeadChunk.chunk.NumSamples()) - require.Len(t, ms.ooo.oooMmappedChunks, 14) // 7 original, 7 duplicate. + require.Len(t, ms.ooo.oooMmappedChunks, 13) // 7 original, 6 duplicate. } checkNonEmptyOOOChunk(series1) checkNonEmptyOOOChunk(series2) @@ -4604,6 +4610,15 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, err) require.Greater(t, f.Size(), int64(100)) + if addExtraSamples { + compactOOOHeadTestingCallback = func() { + addSample(90, 120) // Back in time, to generate a new OOO chunk. + addSample(300, 330) // Now some samples after the previous highest timestamp. + addSample(300, 330) // Repeat to generate an OOO chunk at these timestamps. + } + highest = 330 + } + // OOO compaction happens here. require.NoError(t, db.CompactOOOHead(ctx)) @@ -4619,17 +4634,19 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.Equal(t, "00000001", files[0].Name()) f, err = files[0].Info() require.NoError(t, err) - require.Equal(t, int64(0), f.Size()) - // OOO stuff should not be present in the Head now. - checkEmptyOOOChunk(series1) - checkEmptyOOOChunk(series2) + if !addExtraSamples { + require.Equal(t, int64(0), f.Size()) + // OOO stuff should not be present in the Head now. + checkEmptyOOOChunk(series1) + checkEmptyOOOChunk(series2) + } verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4648,7 +4665,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { // Checking for expected data in the blocks. verifySamples(db.Blocks()[0], 90, 119) verifySamples(db.Blocks()[1], 120, 239) - verifySamples(db.Blocks()[2], 240, 310) + verifySamples(db.Blocks()[2], 240, 299) // There should be a single m-map file. mmapDir := mmappedChunksDir(db.head.opts.ChunkDirRoot) @@ -4661,7 +4678,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { err = db.CompactHead(NewRangeHead(db.head, 250*time.Minute.Milliseconds(), 350*time.Minute.Milliseconds())) require.NoError(t, err) require.Len(t, db.Blocks(), 4) // [0, 120), [120, 240), [240, 360), [250, 351) - verifySamples(db.Blocks()[3], 250, 350) + verifySamples(db.Blocks()[3], 250, highest) verifyDBSamples() // Blocks created out of normal and OOO head now. But not merged. @@ -4678,7 +4695,7 @@ func testOOOCompaction(t *testing.T, scenario sampleTypeScenario) { require.Len(t, db.Blocks(), 3) // [0, 120), [120, 240), [240, 360) verifySamples(db.Blocks()[0], 90, 119) verifySamples(db.Blocks()[1], 120, 239) - verifySamples(db.Blocks()[2], 240, 350) // Merged block. + verifySamples(db.Blocks()[2], 240, highest) // Merged block. verifyDBSamples() // Final state. Blocks from normal and OOO head are merged. } @@ -4713,8 +4730,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4768,8 +4785,8 @@ func testOOOCompactionWithNormalCompaction(t *testing.T, scenario sampleTypeScen verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4822,8 +4839,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4877,8 +4894,8 @@ func testOOOCompactionWithDisabledWriteLog(t *testing.T, scenario sampleTypeScen verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, 2*ts)) } @@ -4931,8 +4948,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) _, _, err = scenario.appendFunc(app, series2, ts, 2*ts) @@ -4979,8 +4996,8 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa verifySamples := func(fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) series2Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) series2Samples = append(series2Samples, scenario.sampleFunc(ts, ts*2)) } @@ -5019,57 +5036,181 @@ func testOOOQueryAfterRestartWithSnapshotAndRemovedWBL(t *testing.T, scenario sa func Test_Querier_OOOQuery(t *testing.T) { opts := DefaultOptions() - opts.OutOfOrderCapMax = 30 opts.OutOfOrderTimeWindow = 24 * time.Hour.Milliseconds() series1 := labels.FromStrings("foo", "bar1") + type filterFunc func(t int64) bool + defaultFilterFunc := func(t int64) bool { return true } + minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) { + addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 - for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { - _, err := app.Append(0, series1, min, float64(min)) - if min >= queryMinT && min <= queryMaxT { - expSamples = append(expSamples, sample{t: min, f: float64(min)}) + for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() { + if !filter(m / time.Minute.Milliseconds()) { + continue + } + _, err := app.Append(0, series1, m, float64(m)) + if m >= queryMinT && m <= queryMaxT { + expSamples = append(expSamples, sample{t: m, f: float64(m)}) } require.NoError(t, err) totalAppended++ } require.NoError(t, app.Commit()) + require.Positive(t, totalAppended, 0) // Sanity check that filter is not too zealous. return expSamples, totalAppended } + type sampleBatch struct { + minT int64 + maxT int64 + filter filterFunc + isOOO bool + } + tests := []struct { - name string - queryMinT int64 - queryMaxT int64 - inOrderMinT int64 - inOrderMaxT int64 - oooMinT int64 - oooMaxT int64 + name string + oooCap int64 + queryMinT int64 + queryMaxT int64 + batches []sampleBatch }{ { - name: "query interval covering ooomint and inordermaxt returns all ingested samples", - queryMinT: minutes(0), - queryMaxT: minutes(200), - inOrderMinT: minutes(100), - inOrderMaxT: minutes(200), - oooMinT: minutes(0), - oooMaxT: minutes(99), + name: "query interval covering ooomint and inordermaxt returns all ingested samples", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: defaultFilterFunc, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: defaultFilterFunc, + isOOO: true, + }, + }, }, { - name: "partial query interval returns only samples within interval", - queryMinT: minutes(20), - queryMaxT: minutes(180), - inOrderMinT: minutes(100), - inOrderMaxT: minutes(200), - oooMinT: minutes(0), - oooMaxT: minutes(99), + name: "partial query interval returns only samples within interval", + oooCap: 30, + queryMinT: minutes(20), + queryMaxT: minutes(180), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: defaultFilterFunc, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: defaultFilterFunc, + isOOO: true, + }, + }, + }, + { + name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(170), + maxT: minutes(180), + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, + }, + { + name: "query overlapping inorder and ooo in-memory samples returns all ingested samples at the beginning of the interval", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(100), + maxT: minutes(110), + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, + }, + { + name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", + oooCap: 5, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(101), + maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + { + minT: minutes(191), + maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, + }, + { + name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(101), + maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + { + minT: minutes(191), + maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, }, } for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { + opts.OutOfOrderCapMax = tc.oooCap db := openTestDB(t, opts, nil) db.DisableCompactions() defer func() { @@ -5077,12 +5218,14 @@ func Test_Querier_OOOQuery(t *testing.T) { }() var expSamples []chunks.Sample + var oooSamples, appendedCount int - // Add in-order samples. - expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples) - - // Add out-of-order samples. - expSamples, oooSamples := addSample(db, tc.oooMinT, tc.oooMaxT, tc.queryMinT, tc.queryMaxT, expSamples) + for _, batch := range tc.batches { + expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter) + if batch.isOOO { + oooSamples += appendedCount + } + } sort.Slice(expSamples, func(i, j int) bool { return expSamples[i].T() < expSamples[j].T() @@ -5108,52 +5251,177 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { series1 := labels.FromStrings("foo", "bar1") + type filterFunc func(t int64) bool + defaultFilterFunc := func(t int64) bool { return true } + minutes := func(m int64) int64 { return m * time.Minute.Milliseconds() } - addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample) ([]chunks.Sample, int) { + addSample := func(db *DB, fromMins, toMins, queryMinT, queryMaxT int64, expSamples []chunks.Sample, filter filterFunc) ([]chunks.Sample, int) { app := db.Appender(context.Background()) totalAppended := 0 - for min := fromMins; min <= toMins; min += time.Minute.Milliseconds() { - _, err := app.Append(0, series1, min, float64(min)) - if min >= queryMinT && min <= queryMaxT { - expSamples = append(expSamples, sample{t: min, f: float64(min)}) + for m := fromMins; m <= toMins; m += time.Minute.Milliseconds() { + if !filter(m / time.Minute.Milliseconds()) { + continue + } + _, err := app.Append(0, series1, m, float64(m)) + if m >= queryMinT && m <= queryMaxT { + expSamples = append(expSamples, sample{t: m, f: float64(m)}) } require.NoError(t, err) totalAppended++ } require.NoError(t, app.Commit()) + require.Positive(t, totalAppended) // Sanity check that filter is not too zealous. return expSamples, totalAppended } + type sampleBatch struct { + minT int64 + maxT int64 + filter filterFunc + isOOO bool + } + tests := []struct { - name string - queryMinT int64 - queryMaxT int64 - inOrderMinT int64 - inOrderMaxT int64 - oooMinT int64 - oooMaxT int64 + name string + oooCap int64 + queryMinT int64 + queryMaxT int64 + batches []sampleBatch }{ { - name: "query interval covering ooomint and inordermaxt returns all ingested samples", - queryMinT: minutes(0), - queryMaxT: minutes(200), - inOrderMinT: minutes(100), - inOrderMaxT: minutes(200), - oooMinT: minutes(0), - oooMaxT: minutes(99), + name: "query interval covering ooomint and inordermaxt returns all ingested samples", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: defaultFilterFunc, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: defaultFilterFunc, + isOOO: true, + }, + }, }, { - name: "partial query interval returns only samples within interval", - queryMinT: minutes(20), - queryMaxT: minutes(180), - inOrderMinT: minutes(100), - inOrderMaxT: minutes(200), - oooMinT: minutes(0), - oooMaxT: minutes(99), + name: "partial query interval returns only samples within interval", + oooCap: 30, + queryMinT: minutes(20), + queryMaxT: minutes(180), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: defaultFilterFunc, + }, + { + minT: minutes(0), + maxT: minutes(99), + filter: defaultFilterFunc, + isOOO: true, + }, + }, + }, + { + name: "query overlapping inorder and ooo samples returns all ingested samples at the end of the interval", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(170), + maxT: minutes(180), + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, + }, + { + name: "query overlapping inorder and ooo in-memory samples returns all ingested samples at the beginning of the interval", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(100), + maxT: minutes(110), + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, + }, + { + name: "query inorder contain ooo mmaped samples returns all ingested samples at the beginning of the interval", + oooCap: 5, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(101), + maxT: minutes(101 + (5-1)*2), // Append samples to fit in a single mmmaped OOO chunk and fit inside the first in-order mmaped chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + { + minT: minutes(191), + maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, + }, + { + name: "query overlapping inorder and ooo mmaped samples returns all ingested samples at the beginning of the interval", + oooCap: 30, + queryMinT: minutes(0), + queryMaxT: minutes(200), + batches: []sampleBatch{ + { + minT: minutes(100), + maxT: minutes(200), + filter: func(t int64) bool { return t%2 == 0 }, + isOOO: false, + }, + { + minT: minutes(101), + maxT: minutes(101 + (30-1)*2), // Append samples to fit in a single mmmaped OOO chunk and overlap the first in-order mmaped chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + { + minT: minutes(191), + maxT: minutes(193), // Append some more OOO samples to trigger mapping the OOO chunk, but use time 151 to not overlap with in-order head chunk. + filter: func(t int64) bool { return t%2 == 1 }, + isOOO: true, + }, + }, }, } for _, tc := range tests { t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { + opts.OutOfOrderCapMax = tc.oooCap db := openTestDB(t, opts, nil) db.DisableCompactions() defer func() { @@ -5161,12 +5429,14 @@ func Test_ChunkQuerier_OOOQuery(t *testing.T) { }() var expSamples []chunks.Sample + var oooSamples, appendedCount int - // Add in-order samples. - expSamples, _ = addSample(db, tc.inOrderMinT, tc.inOrderMaxT, tc.queryMinT, tc.queryMaxT, expSamples) - - // Add out-of-order samples. - expSamples, oooSamples := addSample(db, tc.oooMinT, tc.oooMaxT, tc.queryMinT, tc.queryMaxT, expSamples) + for _, batch := range tc.batches { + expSamples, appendedCount = addSample(db, batch.minT, batch.maxT, tc.queryMinT, tc.queryMaxT, expSamples, batch.filter) + if batch.isOOO { + oooSamples += appendedCount + } + } sort.Slice(expSamples, func(i, j int) bool { return expSamples[i].T() < expSamples[j].T() @@ -5222,9 +5492,9 @@ func testOOOAppendAndQuery(t *testing.T, scenario sampleTypeScenario) { app := db.Appender(context.Background()) key := lbls.String() from, to := minutes(fromMins), minutes(toMins) - for min := from; min <= to; min += time.Minute.Milliseconds() { + for m := from; m <= to; m += time.Minute.Milliseconds() { val := rand.Intn(1000) - _, s, err := scenario.appendFunc(app, lbls, min, int64(val)) + _, s, err := scenario.appendFunc(app, lbls, m, int64(val)) if faceError { require.Error(t, err) } else { @@ -5353,14 +5623,14 @@ func testOOODisabled(t *testing.T, scenario sampleTypeScenario) { app := db.Appender(context.Background()) key := lbls.String() from, to := minutes(fromMins), minutes(toMins) - for min := from; min <= to; min += time.Minute.Milliseconds() { - _, _, err := scenario.appendFunc(app, lbls, min, min) + for m := from; m <= to; m += time.Minute.Milliseconds() { + _, _, err := scenario.appendFunc(app, lbls, m, m) if faceError { require.Error(t, err) failedSamples++ } else { require.NoError(t, err) - expSamples[key] = append(expSamples[key], scenario.sampleFunc(min, min)) + expSamples[key] = append(expSamples[key], scenario.sampleFunc(m, m)) totalSamples++ } } @@ -5427,9 +5697,9 @@ func testWBLAndMmapReplay(t *testing.T, scenario sampleTypeScenario) { app := db.Appender(context.Background()) key := lbls.String() from, to := minutes(fromMins), minutes(toMins) - for min := from; min <= to; min += time.Minute.Milliseconds() { + for m := from; m <= to; m += time.Minute.Milliseconds() { val := rand.Intn(1000) - _, s, err := scenario.appendFunc(app, lbls, min, int64(val)) + _, s, err := scenario.appendFunc(app, lbls, m, int64(val)) require.NoError(t, err) expSamples[key] = append(expSamples[key], s) totalSamples++ @@ -5618,8 +5888,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { addSample := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) } @@ -5706,8 +5976,8 @@ func testOOOCompactionFailure(t *testing.T, scenario sampleTypeScenario) { verifySamples := func(block *Block, fromMins, toMins int64) { series1Samples := make([]chunks.Sample, 0, toMins-fromMins+1) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() series1Samples = append(series1Samples, scenario.sampleFunc(ts, ts)) } expRes := map[string][]chunks.Sample{ @@ -5755,8 +6025,8 @@ func TestWBLCorruption(t *testing.T) { var allSamples, expAfterRestart []chunks.Sample addSamples := func(fromMins, toMins int64, afterRestart bool) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, err := app.Append(0, series1, ts, float64(ts)) require.NoError(t, err) allSamples = append(allSamples, sample{t: ts, f: float64(ts)}) @@ -5909,8 +6179,8 @@ func testOOOMmapCorruption(t *testing.T, scenario sampleTypeScenario) { var allSamples, expInMmapChunks []chunks.Sample addSamples := func(fromMins, toMins int64, inMmapAfterCorruption bool) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -6054,8 +6324,8 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) { series1 := labels.FromStrings("foo", "bar1") addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool, allSamples []chunks.Sample) []chunks.Sample { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) if success { require.NoError(t, err) @@ -6088,7 +6358,7 @@ func testOutOfOrderRuntimeConfig(t *testing.T, scenario sampleTypeScenario) { // WBL is not empty. size, err := db.head.wbl.Size() require.NoError(t, err) - require.Greater(t, size, int64(0)) + require.Positive(t, size) require.Empty(t, db.Blocks()) require.NoError(t, db.compactOOOHead(ctx)) @@ -6265,8 +6535,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { series1 := labels.FromStrings("foo", "bar1") addSamples := func(t *testing.T, db *DB, fromMins, toMins int64, success bool) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, _, err := scenario.appendFunc(app, series1, ts, ts) if success { require.NoError(t, err) @@ -6279,8 +6549,8 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { verifySamples := func(t *testing.T, db *DB, fromMins, toMins int64) { var expSamples []chunks.Sample - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() expSamples = append(expSamples, scenario.sampleFunc(ts, ts)) } @@ -6393,8 +6663,8 @@ func testWblReplayAfterOOODisableAndRestart(t *testing.T, scenario sampleTypeSce var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -6460,8 +6730,8 @@ func testPanicOnApplyConfig(t *testing.T, scenario sampleTypeScenario) { var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -6517,8 +6787,8 @@ func testDiskFillingUpAfterDisablingOOO(t *testing.T, scenario sampleTypeScenari var allSamples []chunks.Sample addSamples := func(fromMins, toMins int64) { app := db.Appender(context.Background()) - for min := fromMins; min <= toMins; min++ { - ts := min * time.Minute.Milliseconds() + for m := fromMins; m <= toMins; m++ { + ts := m * time.Minute.Milliseconds() _, s, err := scenario.appendFunc(app, series1, ts, ts) require.NoError(t, err) allSamples = append(allSamples, s) @@ -7341,3 +7611,25 @@ func TestBlockQuerierAndBlockChunkQuerier(t *testing.T) { // Make sure only block-1 is queried. require.Equal(t, "block-1", lbls.Get("block")) } + +func TestGenerateCompactionDelay(t *testing.T) { + assertDelay := func(delay time.Duration) { + t.Helper() + require.GreaterOrEqual(t, delay, time.Duration(0)) + // Less than 10% of the chunkRange. + require.LessOrEqual(t, delay, 6000*time.Millisecond) + } + + opts := DefaultOptions() + opts.EnableDelayedCompaction = true + db := openTestDB(t, opts, []int64{60000}) + defer func() { + require.NoError(t, db.Close()) + }() + // The offset is generated and changed while opening. + assertDelay(db.opts.CompactionDelay) + + for i := 0; i < 1000; i++ { + assertDelay(db.generateCompactionDelay()) + } +} diff --git a/tsdb/encoding/encoding.go b/tsdb/encoding/encoding.go index cd98fbd82..88fdd30c8 100644 --- a/tsdb/encoding/encoding.go +++ b/tsdb/encoding/encoding.go @@ -201,8 +201,8 @@ func (d *Decbuf) UvarintStr() string { return string(d.UvarintBytes()) } -// The return value becomes invalid if the byte slice goes away. -// Compared to UvarintStr, this avoid allocations. +// UvarintBytes returns invalid values if the byte slice goes away. +// Compared to UvarintStr, it avoid allocations. func (d *Decbuf) UvarintBytes() []byte { l := d.Uvarint64() if d.E != nil { diff --git a/tsdb/head.go b/tsdb/head.go index 9d81b24ae..b7bfaa0fd 100644 --- a/tsdb/head.go +++ b/tsdb/head.go @@ -178,7 +178,6 @@ type HeadOptions struct { WALReplayConcurrency int // EnableSharding enables ShardedPostings() support in the Head. - // EnableSharding is temporarily disabled during Init(). EnableSharding bool } @@ -610,7 +609,7 @@ const cardinalityCacheExpirationTime = time.Duration(30) * time.Second // Init loads data from the write ahead log and prepares the head for writes. // It should be called before using an appender so that it // limits the ingested samples to the head min valid time. -func (h *Head) Init(minValidTime int64) (err error) { +func (h *Head) Init(minValidTime int64) error { h.minValidTime.Store(minValidTime) defer func() { h.postings.EnsureOrder(h.opts.WALReplayConcurrency) @@ -624,24 +623,6 @@ func (h *Head) Init(minValidTime int64) (err error) { } }() - // If sharding is enabled, disable it while initializing, and calculate the shards later. - // We're going to use that field for other purposes during WAL replay, - // so we don't want to waste time on calculating the shard that we're going to lose anyway. - if h.opts.EnableSharding { - h.opts.EnableSharding = false - defer func() { - h.opts.EnableSharding = true - if err == nil { - // No locking is needed here as nobody should be writing while we're in Init. - for _, stripe := range h.series.series { - for _, s := range stripe { - s.shardHashOrMemoryMappedMaxTime = labels.StableHash(s.lset) - } - } - } - }() - } - level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") start := time.Now() @@ -702,6 +683,7 @@ func (h *Head) Init(minValidTime int64) (err error) { mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk lastMmapRef chunks.ChunkDiskMapperRef + err error mmapChunkReplayDuration time.Duration ) @@ -2086,11 +2068,9 @@ type memSeries struct { ref chunks.HeadSeriesRef meta *metadata.Metadata - // Series labels hash to use for sharding purposes. - // The value is always 0 when sharding has not been explicitly enabled in TSDB. - // While the WAL replay the value stored here is the max time of any mmapped chunk, - // and the shard hash is re-calculated after WAL replay is complete. - shardHashOrMemoryMappedMaxTime uint64 + // Series labels hash to use for sharding purposes. The value is always 0 when sharding has not + // been explicitly enabled in TSDB. + shardHash uint64 // Everything after here should only be accessed with the lock held. sync.Mutex @@ -2115,6 +2095,8 @@ type memSeries struct { ooo *memSeriesOOOFields + mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. + nextAt int64 // Timestamp at which to cut the next chunk. histogramChunkHasComputedEndTime bool // True if nextAt has been predicted for the current histograms chunk; false otherwise. pendingCommit bool // Whether there are samples waiting to be committed to this series. @@ -2145,10 +2127,10 @@ type memSeriesOOOFields struct { func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, shardHash uint64, isolationDisabled bool) *memSeries { s := &memSeries{ - lset: lset, - ref: id, - nextAt: math.MinInt64, - shardHashOrMemoryMappedMaxTime: shardHash, + lset: lset, + ref: id, + nextAt: math.MinInt64, + shardHash: shardHash, } if !isolationDisabled { s.txs = newTxRing(0) @@ -2236,12 +2218,6 @@ func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkD return removedInOrder + removedOOO } -// shardHash returns the shard hash of the series, only available after WAL replay. -func (s *memSeries) shardHash() uint64 { return s.shardHashOrMemoryMappedMaxTime } - -// mmMaxTime returns the max time of any mmapped chunk in the series, only available during WAL replay. -func (s *memSeries) mmMaxTime() int64 { return int64(s.shardHashOrMemoryMappedMaxTime) } - // cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after // acquiring lock. func (s *memSeries) cleanupAppendIDsBelow(bound uint64) { diff --git a/tsdb/head_append.go b/tsdb/head_append.go index 8d66d1e81..988ce9397 100644 --- a/tsdb/head_append.go +++ b/tsdb/head_append.go @@ -19,6 +19,7 @@ import ( "fmt" "math" + "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/prometheus/model/exemplar" @@ -466,6 +467,9 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTi // like federation and erroring out at that time would be extremely noisy. // This only checks against the latest in-order sample. // The OOO headchunk has its own method to detect these duplicates. + if s.lastHistogramValue != nil || s.lastFloatHistogramValue != nil { + return false, 0, storage.NewDuplicateHistogramToFloatErr(t, v) + } if math.Float64bits(s.lastValue) != math.Float64bits(v) { return false, 0, storage.NewDuplicateFloatErr(t, s.lastValue, v) } @@ -837,7 +841,7 @@ func (a *headAppender) Commit() (err error) { floatsAppended = len(a.samples) histogramsAppended = len(a.histograms) + len(a.floatHistograms) // number of samples out of order but accepted: with ooo enabled and within time window - floatOOOAccepted int + oooFloatsAccepted int // number of samples rejected due to: out of order but OOO support disabled. floatOOORejected int histoOOORejected int @@ -933,7 +937,7 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, a.head.chunkDiskMapper, oooCapMax) + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) if chunkCreated { r, ok := oooMmapMarkers[series.ref] if !ok || r != nil { @@ -966,7 +970,7 @@ func (a *headAppender) Commit() (err error) { if s.T > oooMaxT { oooMaxT = s.T } - floatOOOAccepted++ + oooFloatsAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, @@ -1062,7 +1066,7 @@ func (a *headAppender) Commit() (err error) { a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected)) a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended)) a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOOAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted)) a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) @@ -1080,18 +1084,18 @@ func (a *headAppender) Commit() (err error) { } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } c := s.ooo.oooHeadChunk if c == nil || c.chunk.NumSamples() == int(oooCapMax) { // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. - c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper) + c, mmapRefs = s.cutNewOOOHeadChunk(t, chunkDiskMapper, logger) chunkCreated = true } - ok := c.chunk.Insert(t, v) + ok := c.chunk.Insert(t, v, h, fh) if ok { if chunkCreated || t < c.minTime { c.minTime = t @@ -1399,12 +1403,12 @@ func (s *memSeries) histogramsAppendPreprocessor(t int64, e chunkenc.Encoding, o // It assumes that the time range is 1/ratioToFull full. // Assuming that the samples will keep arriving at the same rate, it will make the // remaining n chunks within this chunk range (before max) equally sized. -func computeChunkEndTime(start, cur, max int64, ratioToFull float64) int64 { - n := float64(max-start) / (float64(cur-start+1) * ratioToFull) +func computeChunkEndTime(start, cur, maxT int64, ratioToFull float64) int64 { + n := float64(maxT-start) / (float64(cur-start+1) * ratioToFull) if n <= 1 { - return max + return maxT } - return int64(float64(start) + float64(max-start)/math.Floor(n)) + return int64(float64(start) + float64(maxT-start)/math.Floor(n)) } func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange int64) *memChunk { @@ -1441,9 +1445,9 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange } // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. -// The caller must ensure that s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { - ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) +// The caller must ensure that s is locked and s.ooo is not nil. +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { + ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger) s.ooo.oooHeadChunk = &oooHeadChunk{ chunk: NewOOOChunk(), @@ -1454,7 +1458,8 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk return s.ooo.oooHeadChunk, ref } -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper) []chunks.ChunkDiskMapperRef { +// s must be locked when calling. +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { // OOO is not enabled or there is no head chunk, so nothing to m-map here. return nil @@ -1466,6 +1471,10 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap } chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, 1) for _, memchunk := range chks { + if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) { + level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String()) + break + } chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.ooo.oooHeadChunk.minTime, s.ooo.oooHeadChunk.maxTime, memchunk.chunk, true, handleChunkWriteError) chunkRefs = append(chunkRefs, chunkRef) s.ooo.oooMmappedChunks = append(s.ooo.oooMmappedChunks, &mmappedChunk{ diff --git a/tsdb/head_other.go b/tsdb/head_other.go index eb1b93a3e..fea91530d 100644 --- a/tsdb/head_other.go +++ b/tsdb/head_other.go @@ -26,7 +26,7 @@ func (s *memSeries) labels() labels.Labels { return s.lset } -// No-op when not using dedupelabels. +// RebuildSymbolTable is a no-op when not using dedupelabels. func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { return nil } diff --git a/tsdb/head_read.go b/tsdb/head_read.go index 3a50f316b..d81ffbb6a 100644 --- a/tsdb/head_read.go +++ b/tsdb/head_read.go @@ -170,7 +170,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou } // Check if the series belong to the shard. - if s.shardHash()%shardCount != shardIndex { + if s.shardHash%shardCount != shardIndex { continue } @@ -199,13 +199,18 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB defer s.Unlock() *chks = (*chks)[:0] + *chks = appendSeriesChunks(s, h.mint, h.maxt, *chks) + return nil +} + +func appendSeriesChunks(s *memSeries, mint, maxt int64, chks []chunks.Meta) []chunks.Meta { for i, c := range s.mmappedChunks { // Do not expose chunks that are outside of the specified range. - if !c.OverlapsClosedInterval(h.mint, h.maxt) { + if !c.OverlapsClosedInterval(mint, maxt) { continue } - *chks = append(*chks, chunks.Meta{ + chks = append(chks, chunks.Meta{ MinTime: c.minTime, MaxTime: c.maxTime, Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(i))), @@ -223,8 +228,8 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB } else { maxTime = chk.maxTime } - if chk.OverlapsClosedInterval(h.mint, h.maxt) { - *chks = append(*chks, chunks.Meta{ + if chk.OverlapsClosedInterval(mint, maxt) { + chks = append(chks, chunks.Meta{ MinTime: chk.minTime, MaxTime: maxTime, Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.headChunkID(len(s.mmappedChunks)+j))), @@ -233,8 +238,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB j++ } } - - return nil + return chks } // headChunkID returns the HeadChunkID referred to by the given position. @@ -244,12 +248,20 @@ func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { return chunks.HeadChunkID(pos) + s.firstChunkID } +const oooChunkIDMask = 1 << 23 + // oooHeadChunkID returns the HeadChunkID referred to by the given position. +// Only the bottom 24 bits are used. Bit 23 is always 1 for an OOO chunk; for the rest: // * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos] // * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk // The caller must ensure that s.ooo is not nil. func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { - return chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID + return (chunks.HeadChunkID(pos) + s.ooo.firstOOOChunkID) | oooChunkIDMask +} + +func unpackHeadChunkRef(ref chunks.ChunkRef) (seriesID chunks.HeadSeriesRef, chunkID chunks.HeadChunkID, isOOO bool) { + sid, cid := chunks.HeadChunkRef(ref).Unpack() + return sid, (cid & (oooChunkIDMask - 1)), (cid & oooChunkIDMask) != 0 } // LabelValueFor returns label value for the given label name in the series referred to by ID. @@ -339,17 +351,22 @@ func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chu return chk, nil, err } -// ChunkWithCopy returns the chunk for the reference number. -// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk. -func (h *headChunkReader) ChunkWithCopy(meta chunks.Meta) (chunkenc.Chunk, int64, error) { - return h.chunk(meta, true) +type ChunkReaderWithCopy interface { + ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) +} + +// ChunkOrIterableWithCopy returns the chunk for the reference number. +// If the chunk is the in-memory chunk, then it makes a copy and returns the copied chunk, plus the max time of the chunk. +func (h *headChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { + chk, maxTime, err := h.chunk(meta, true) + return chk, nil, maxTime, err } // chunk returns the chunk for the reference number. // If copyLastChunk is true, then it makes a copy of the head chunk if asked for it. // Also returns max time of the chunk. func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, int64, error) { - sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack() + sid, cid, isOOO := unpackHeadChunkRef(meta.Ref) s := h.head.series.getByID(sid) // This means that the series has been garbage collected. @@ -358,9 +375,23 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. } s.Lock() - c, headChunk, isOpen, err := s.chunk(cid, h.head.chunkDiskMapper, &h.head.memChunkPool) + defer s.Unlock() + return h.head.chunkFromSeries(s, cid, isOOO, h.mint, h.maxt, h.isoState, copyLastChunk) +} + +// Dumb thing to defeat chunk pool. +type wrapOOOHeadChunk struct { + chunkenc.Chunk +} + +// Call with s locked. +func (h *Head) chunkFromSeries(s *memSeries, cid chunks.HeadChunkID, isOOO bool, mint, maxt int64, isoState *isolationState, copyLastChunk bool) (chunkenc.Chunk, int64, error) { + if isOOO { + chk, maxTime, err := s.oooChunk(cid, h.chunkDiskMapper, &h.memChunkPool) + return wrapOOOHeadChunk{chk}, maxTime, err + } + c, headChunk, isOpen, err := s.chunk(cid, h.chunkDiskMapper, &h.memChunkPool) if err != nil { - s.Unlock() return nil, 0, err } defer func() { @@ -368,13 +399,12 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. // Set this to nil so that Go GC can collect it after it has been used. c.chunk = nil c.prev = nil - h.head.memChunkPool.Put(c) + h.memChunkPool.Put(c) } }() // This means that the chunk is outside the specified range. - if !c.OverlapsClosedInterval(h.mint, h.maxt) { - s.Unlock() + if !c.OverlapsClosedInterval(mint, maxt) { return nil, 0, storage.ErrNotFound } @@ -386,18 +416,17 @@ func (h *headChunkReader) chunk(meta chunks.Meta, copyLastChunk bool) (chunkenc. newB := make([]byte, len(b)) copy(newB, b) // TODO(codesome): Use bytes.Clone() when we upgrade to Go 1.20. // TODO(codesome): Put back in the pool (non-trivial). - chk, err = h.head.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB) + chk, err = h.opts.ChunkPool.Get(s.headChunks.chunk.Encoding(), newB) if err != nil { return nil, 0, err } } - s.Unlock() return &safeHeadChunk{ Chunk: chk, s: s, cid: cid, - isoState: h.isoState, + isoState: isoState, }, maxTime, nil } @@ -410,7 +439,7 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi // incremented by 1 when new chunk is created, hence (id - firstChunkID) gives the slice index. // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix // is >= len(s.mmappedChunks), it represents one of the chunks on s.headChunks linked list. - // The order of elemens is different for slice and linked list. + // The order of elements is different for slice and linked list. // For s.mmappedChunks slice newer chunks are appended to it. // For s.headChunks list newer chunks are prepended to it. // @@ -461,82 +490,19 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi return elem, true, offset == 0, nil } -// oooMergedChunks return an iterable over one or more OOO chunks for the given -// chunks.Meta reference from memory or by m-mapping it from the disk. The -// returned iterable will be a merge of all the overlapping chunks, if any, -// amongst all the chunks in the OOOHead. -// This function is not thread safe unless the caller holds a lock. -// The caller must ensure that s.ooo is not nil. -func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) { - _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() +// oooChunk returns the chunk for the HeadChunkID by m-mapping it from the disk. +// It never returns the head OOO chunk. +func (s *memSeries) oooChunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDiskMapper, memChunkPool *sync.Pool) (chunk chunkenc.Chunk, maxTime int64, err error) { + // ix represents the index of chunk in the s.ooo.oooMmappedChunks slice. The chunk id's are + // incremented by 1 when new chunk is created, hence (id - firstOOOChunkID) gives the slice index. + ix := int(id) - int(s.ooo.firstOOOChunkID) - // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are - // incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index. - // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix - // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. - ix := int(cid) - int(s.ooo.firstOOOChunkID) - if ix < 0 || ix > len(s.ooo.oooMmappedChunks) { - return nil, storage.ErrNotFound + if ix < 0 || ix >= len(s.ooo.oooMmappedChunks) { + return nil, 0, storage.ErrNotFound } - if ix == len(s.ooo.oooMmappedChunks) { - if s.ooo.oooHeadChunk == nil { - return nil, errors.New("invalid ooo head chunk") - } - } - - // We create a temporary slice of chunk metas to hold the information of all - // possible chunks that may overlap with the requested chunk. - tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.ooo.oooMmappedChunks)+1) - - for i, c := range s.ooo.oooMmappedChunks { - if c.OverlapsClosedInterval(mint, maxt) { - tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ - meta: chunks.Meta{ - MinTime: c.minTime, - MaxTime: c.maxTime, - Ref: chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))), - }, - ref: c.ref, - }) - } - } - // Add in data copied from the head OOO chunk. - if meta.Chunk != nil { - tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{meta: meta}) - } - - // Next we want to sort all the collected chunks by min time so we can find - // those that overlap and stop when we know the rest don't. - slices.SortFunc(tmpChks, refLessByMinTimeAndMinRef) - - mc := &mergedOOOChunks{} - absoluteMax := int64(math.MinInt64) - for _, c := range tmpChks { - if c.meta.Ref != meta.Ref && (len(mc.chunkIterables) == 0 || c.meta.MinTime > absoluteMax) { - continue - } - var iterable chunkenc.Iterable - if c.meta.Chunk != nil { - iterable = c.meta.Chunk - } else { - chk, err := cdm.Chunk(c.ref) - if err != nil { - var cerr *chunks.CorruptionErr - if errors.As(err, &cerr) { - return nil, fmt.Errorf("invalid ooo mmapped chunk: %w", err) - } - return nil, err - } - iterable = chk - } - mc.chunkIterables = append(mc.chunkIterables, iterable) - if c.meta.MaxTime > absoluteMax { - absoluteMax = c.meta.MaxTime - } - } - - return mc, nil + chk, err := chunkDiskMapper.Chunk(s.ooo.oooMmappedChunks[ix].ref) + return chk, s.ooo.oooMmappedChunks[ix].maxTime, err } // safeHeadChunk makes sure that the chunk can be accessed without a race condition. diff --git a/tsdb/head_test.go b/tsdb/head_test.go index 09927c23c..18ec4f0ac 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -23,7 +23,6 @@ import ( "path" "path/filepath" "reflect" - "runtime/pprof" "sort" "strconv" "strings" @@ -34,7 +33,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/prometheus/client_golang/prometheus" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/common/model" "github.com/stretchr/testify/require" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -90,43 +88,6 @@ func newTestHeadWithOptions(t testing.TB, compressWAL wlog.CompressionType, opts return h, wal } -// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set. -// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located. -// Optionally, BENCHMARK_LOAD_REAL_WLS_PROFILE can be set to a file path to write a CPU profile. -func BenchmarkLoadRealWLs(b *testing.B) { - dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR") - if dir == "" { - b.Skipped() - } - - profileFile := os.Getenv("BENCHMARK_LOAD_REAL_WLS_PROFILE") - if profileFile != "" { - b.Logf("Will profile in %s", profileFile) - f, err := os.Create(profileFile) - require.NoError(b, err) - b.Cleanup(func() { f.Close() }) - require.NoError(b, pprof.StartCPUProfile(f)) - b.Cleanup(pprof.StopCPUProfile) - } - - wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone) - require.NoError(b, err) - b.Cleanup(func() { wal.Close() }) - - wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone) - require.NoError(b, err) - b.Cleanup(func() { wbl.Close() }) - - // Load the WAL. - for i := 0; i < b.N; i++ { - opts := DefaultHeadOptions() - opts.ChunkDirRoot = dir - h, err := NewHead(nil, nil, wal, wbl, opts, nil) - require.NoError(b, err) - h.Init(0) - } -} - func BenchmarkCreateSeries(b *testing.B) { series := genSeries(b.N, 10, 0, 0) h, _ := newTestHead(b, 10000, wlog.CompressionNone, false) @@ -476,6 +437,32 @@ func BenchmarkLoadWLs(b *testing.B) { } } +// BenchmarkLoadRealWLs will be skipped unless the BENCHMARK_LOAD_REAL_WLS_DIR environment variable is set. +// BENCHMARK_LOAD_REAL_WLS_DIR should be the folder where `wal` and `chunks_head` are located. +func BenchmarkLoadRealWLs(b *testing.B) { + dir := os.Getenv("BENCHMARK_LOAD_REAL_WLS_DIR") + if dir == "" { + b.SkipNow() + } + + wal, err := wlog.New(nil, nil, filepath.Join(dir, "wal"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wal.Close() }) + + wbl, err := wlog.New(nil, nil, filepath.Join(dir, "wbl"), wlog.CompressionNone) + require.NoError(b, err) + b.Cleanup(func() { wbl.Close() }) + + // Load the WAL. + for i := 0; i < b.N; i++ { + opts := DefaultHeadOptions() + opts.ChunkDirRoot = dir + h, err := NewHead(nil, nil, wal, wbl, opts, nil) + require.NoError(b, err) + require.NoError(b, h.Init(0)) + } +} + // TestHead_HighConcurrencyReadAndWrite generates 1000 series with a step of 15s and fills a whole block with samples, // this means in total it generates 4000 chunks because with a step of 15s there are 4 chunks per block per series. // While appending the samples to the head it concurrently queries them from multiple go routines and verifies that the @@ -2757,7 +2744,7 @@ func testOutOfOrderSamplesMetric(t *testing.T, scenario sampleTypeScenario) { require.Equal(t, int64(math.MinInt64), db.head.minValidTime.Load()) require.NoError(t, db.Compact(ctx)) - require.Greater(t, db.head.minValidTime.Load(), int64(0)) + require.Positive(t, db.head.minValidTime.Load()) app = db.Appender(ctx) _, err = appendSample(app, db.head.minValidTime.Load()-2) @@ -3677,7 +3664,7 @@ func TestHistogramInWALAndMmapChunk(t *testing.T) { require.Len(t, ms.mmappedChunks, 25) expMmapChunks := make([]*mmappedChunk, 0, 20) for _, mmap := range ms.mmappedChunks { - require.Greater(t, mmap.numSamples, uint16(0)) + require.Positive(t, mmap.numSamples) cpy := *mmap expMmapChunks = append(expMmapChunks, &cpy) } @@ -5695,7 +5682,7 @@ func TestCuttingNewHeadChunks(t *testing.T) { } } -// TestHeadDetectsDuplcateSampleAtSizeLimit tests a regression where a duplicate sample +// TestHeadDetectsDuplicateSampleAtSizeLimit tests a regression where a duplicate sample // is appended to the head, right when the head chunk is at the size limit. // The test adds all samples as duplicate, thus expecting that the result has // exactly half of the samples. @@ -5919,6 +5906,35 @@ func TestPostingsCardinalityStats(t *testing.T) { require.Equal(t, statsForSomeLabel1, head.PostingsCardinalityStats("n", 1)) } +func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing.T) { + head, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + t.Cleanup(func() { head.Close() }) + + ls := labels.FromStrings(labels.MetricName, "test") + + { + // Append a float 10.0 @ 1_000 + app := head.Appender(context.Background()) + _, err := app.Append(0, ls, 1_000, 10.0) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + { + // Append a float histogram @ 2_000 + app := head.Appender(context.Background()) + h := tsdbutil.GenerateTestHistogram(1) + _, err := app.AppendHistogram(0, ls, 2_000, h, nil) + require.NoError(t, err) + require.NoError(t, app.Commit()) + } + + app := head.Appender(context.Background()) + _, err := app.Append(0, ls, 2_000, 10.0) + require.Error(t, err) + require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0)) +} + func TestHeadAppender_AppendCTZeroSample(t *testing.T) { type appendableSamples struct { ts int64 @@ -5928,16 +5944,16 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { for _, tc := range []struct { name string appendableSamples []appendableSamples - expectedSamples []model.Sample + expectedSamples []chunks.Sample }{ { name: "In order ct+normal sample", appendableSamples: []appendableSamples{ {ts: 100, val: 10, ct: 1}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, }, }, { @@ -5946,10 +5962,10 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { {ts: 100, val: 10, ct: 1}, {ts: 101, val: 10, ct: 1}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, - {Timestamp: 101, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, }, }, { @@ -5958,11 +5974,11 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { {ts: 100, val: 10, ct: 1}, {ts: 102, val: 10, ct: 101}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, - {Timestamp: 101, Value: 0}, - {Timestamp: 102, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 0}, + sample{t: 102, f: 10}, }, }, { @@ -5971,41 +5987,33 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) { {ts: 100, val: 10, ct: 1}, {ts: 101, val: 10, ct: 100}, }, - expectedSamples: []model.Sample{ - {Timestamp: 1, Value: 0}, - {Timestamp: 100, Value: 10}, - {Timestamp: 101, Value: 10}, + expectedSamples: []chunks.Sample{ + sample{t: 1, f: 0}, + sample{t: 100, f: 10}, + sample{t: 101, f: 10}, }, }, } { - h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) - defer func() { - require.NoError(t, h.Close()) - }() - a := h.Appender(context.Background()) - lbls := labels.FromStrings("foo", "bar") - for _, sample := range tc.appendableSamples { - _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) - require.NoError(t, err) - _, err = a.Append(0, lbls, sample.ts, sample.val) - require.NoError(t, err) - } - require.NoError(t, a.Commit()) + t.Run(tc.name, func(t *testing.T) { + h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) + defer func() { + require.NoError(t, h.Close()) + }() + a := h.Appender(context.Background()) + lbls := labels.FromStrings("foo", "bar") + for _, sample := range tc.appendableSamples { + _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) + require.NoError(t, err) + _, err = a.Append(0, lbls, sample.ts, sample.val) + require.NoError(t, err) + } + require.NoError(t, a.Commit()) - q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) - require.NoError(t, err) - ss := q.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) - require.True(t, ss.Next()) - s := ss.At() - require.False(t, ss.Next()) - it := s.Iterator(nil) - for _, sample := range tc.expectedSamples { - require.Equal(t, chunkenc.ValFloat, it.Next()) - timestamp, value := it.At() - require.Equal(t, sample.Timestamp, model.Time(timestamp)) - require.Equal(t, sample.Value, model.SampleValue(value)) - } - require.Equal(t, chunkenc.ValNone, it.Next()) + q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) + require.NoError(t, err) + result := query(t, q, labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")) + require.Equal(t, tc.expectedSamples, result[`{foo="bar"}`]) + }) } } diff --git a/tsdb/head_wal.go b/tsdb/head_wal.go index 2852709a0..ef96b5330 100644 --- a/tsdb/head_wal.go +++ b/tsdb/head_wal.go @@ -435,8 +435,6 @@ Outer: return nil } -func minInt64() int64 { return math.MinInt64 } - // resetSeriesWithMMappedChunks is only used during the WAL replay. func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk, walSeriesRef chunks.HeadSeriesRef) (overlapped bool) { if mSeries.ref != walSeriesRef { @@ -483,11 +481,10 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m } // Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject. if len(mmc) == 0 { - mSeries.shardHashOrMemoryMappedMaxTime = uint64(minInt64()) + mSeries.mmMaxTime = math.MinInt64 } else { - mmMaxTime := mmc[len(mmc)-1].maxTime - mSeries.shardHashOrMemoryMappedMaxTime = uint64(mmMaxTime) - h.updateMinMaxTime(mmc[0].minTime, mmMaxTime) + mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime + h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime) } if len(oooMmc) != 0 { // Mint and maxt can be in any chunk, they are not sorted. @@ -588,7 +585,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp unknownRefs++ continue } - if s.T <= ms.mmMaxTime() { + if s.T <= ms.mmMaxTime { continue } if _, chunkCreated := ms.append(s.T, s.V, 0, appendChunkOpts); chunkCreated { @@ -617,7 +614,7 @@ func (wp *walSubsetProcessor) processWALSamples(h *Head, mmappedChunks, oooMmapp unknownHistogramRefs++ continue } - if s.t <= ms.mmMaxTime() { + if s.t <= ms.mmMaxTime { continue } var chunkCreated bool @@ -890,7 +887,7 @@ func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { unknownRefs++ continue } - ok, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper, oooCapMax) + ok, chunkCreated, _ := ms.insert(s.T, s.V, nil, nil, h.chunkDiskMapper, oooCapMax, h.logger) if chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() diff --git a/tsdb/index/index.go b/tsdb/index/index.go index 362105459..0e0e35371 100644 --- a/tsdb/index/index.go +++ b/tsdb/index/index.go @@ -196,8 +196,9 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { return toc, d.Err() } -// NewWriter returns a new Writer to the given filename. It serializes data in format version 2. -// It uses the given encoder to encode each postings list. +// NewWriterWithEncoder returns a new Writer to the given filename. It +// serializes data in format version 2. It uses the given encoder to encode each +// postings list. func NewWriterWithEncoder(ctx context.Context, fn string, encoder PostingsEncoder) (*Writer, error) { dir := filepath.Dir(fn) diff --git a/tsdb/index/postingsstats_test.go b/tsdb/index/postingsstats_test.go index 674e1c052..82f506bc8 100644 --- a/tsdb/index/postingsstats_test.go +++ b/tsdb/index/postingsstats_test.go @@ -20,10 +20,10 @@ import ( func TestPostingsStats(t *testing.T) { stats := &maxHeap{} - max := 3000000 - heapLength := 10 + const maxCount = 3000000 + const heapLength = 10 stats.init(heapLength) - for i := 0; i < max; i++ { + for i := 0; i < maxCount; i++ { item := Stat{ Name: "Label-da", Count: uint64(i), @@ -35,13 +35,13 @@ func TestPostingsStats(t *testing.T) { data := stats.get() require.Len(t, data, 10) for i := 0; i < heapLength; i++ { - require.Equal(t, uint64(max-i), data[i].Count) + require.Equal(t, uint64(maxCount-i), data[i].Count) } } func TestPostingsStats2(t *testing.T) { stats := &maxHeap{} - heapLength := 10 + const heapLength = 10 stats.init(heapLength) stats.push(Stat{Name: "Stuff", Count: 10}) @@ -57,12 +57,12 @@ func TestPostingsStats2(t *testing.T) { func BenchmarkPostingStatsMaxHep(b *testing.B) { stats := &maxHeap{} - max := 9000000 - heapLength := 10 + const maxCount = 9000000 + const heapLength = 10 b.ResetTimer() for n := 0; n < b.N; n++ { stats.init(heapLength) - for i := 0; i < max; i++ { + for i := 0; i < maxCount; i++ { item := Stat{ Name: "Label-da", Count: uint64(i), diff --git a/tsdb/ooo_head.go b/tsdb/ooo_head.go index b2556d62e..0ed9f3648 100644 --- a/tsdb/ooo_head.go +++ b/tsdb/ooo_head.go @@ -14,15 +14,10 @@ package tsdb import ( - "fmt" "sort" + "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/tsdb/chunkenc" - - "github.com/oklog/ulid" - - "github.com/prometheus/prometheus/tsdb/chunks" - "github.com/prometheus/prometheus/tsdb/tombstones" ) // OOOChunk maintains samples in time-ascending order. @@ -39,13 +34,13 @@ func NewOOOChunk() *OOOChunk { // Insert inserts the sample such that order is maintained. // Returns false if insert was not possible due to the same timestamp already existing. -func (o *OOOChunk) Insert(t int64, v float64) bool { +func (o *OOOChunk) Insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram) bool { // Although out-of-order samples can be out-of-order amongst themselves, we // are opinionated and expect them to be usually in-order meaning we could // try to append at the end first if the new timestamp is higher than the // last known timestamp. if len(o.samples) == 0 || t > o.samples[len(o.samples)-1].t { - o.samples = append(o.samples, sample{t, v, nil, nil}) + o.samples = append(o.samples, sample{t, v, h, fh}) return true } @@ -54,7 +49,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool { if i >= len(o.samples) { // none found. append it at the end - o.samples = append(o.samples, sample{t, v, nil, nil}) + o.samples = append(o.samples, sample{t, v, h, fh}) return true } @@ -66,7 +61,7 @@ func (o *OOOChunk) Insert(t int64, v float64) bool { // Expand length by 1 to make room. use a zero sample, we will overwrite it anyway. o.samples = append(o.samples, sample{}) copy(o.samples[i+1:], o.samples[i:]) - o.samples[i] = sample{t, v, nil, nil} + o.samples[i] = sample{t, v, h, fh} return true } @@ -142,9 +137,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error if newChunk != nil { // A new chunk was allocated. if !recoded { chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + cmint = s.t } chunk = newChunk - cmint = s.t } case chunkenc.EncFloatHistogram: // Ignoring ok is ok, since we don't want to compare to the wrong previous appender anyway. @@ -157,9 +152,9 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error if newChunk != nil { // A new chunk was allocated. if !recoded { chks = append(chks, memChunk{chunk, cmint, cmaxt, nil}) + cmint = s.t } chunk = newChunk - cmint = s.t } } cmaxt = s.t @@ -170,75 +165,3 @@ func (o *OOOChunk) ToEncodedChunks(mint, maxt int64) (chks []memChunk, err error } return chks, nil } - -var _ BlockReader = &OOORangeHead{} - -// OOORangeHead allows querying Head out of order samples via BlockReader -// interface implementation. -type OOORangeHead struct { - head *Head - // mint and maxt are tracked because when a query is handled we only want - // the timerange of the query and having preexisting pointers to the first - // and last timestamp help with that. - mint, maxt int64 - - isoState *oooIsolationState -} - -func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead { - isoState := head.oooIso.TrackReadAfter(minRef) - - return &OOORangeHead{ - head: head, - mint: mint, - maxt: maxt, - isoState: isoState, - } -} - -func (oh *OOORangeHead) Index() (IndexReader, error) { - return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil -} - -func (oh *OOORangeHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil -} - -func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { - // As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing - // Tombstones are not supported for out of order metrics. - return tombstones.NewMemTombstones(), nil -} - -var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD") - -func (oh *OOORangeHead) Meta() BlockMeta { - return BlockMeta{ - MinTime: oh.mint, - MaxTime: oh.maxt, - ULID: oooRangeHeadULID, - Stats: BlockStats{ - NumSeries: oh.head.NumSeries(), - }, - } -} - -// Size returns the size taken by the Head block. -func (oh *OOORangeHead) Size() int64 { - return oh.head.Size() -} - -// String returns an human readable representation of the out of order range -// head. It's important to keep this function in order to avoid the struct dump -// when the head is stringified in errors or logs. -func (oh *OOORangeHead) String() string { - return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime()) -} - -func (oh *OOORangeHead) MinTime() int64 { - return oh.mint -} - -func (oh *OOORangeHead) MaxTime() int64 { - return oh.maxt -} diff --git a/tsdb/ooo_head_read.go b/tsdb/ooo_head_read.go index a35276af5..7b58ec566 100644 --- a/tsdb/ooo_head_read.go +++ b/tsdb/ooo_head_read.go @@ -16,6 +16,7 @@ package tsdb import ( "context" "errors" + "fmt" "math" "slices" @@ -27,17 +28,12 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/tombstones" + "github.com/prometheus/prometheus/util/annotations" ) -var _ IndexReader = &OOOHeadIndexReader{} +var _ IndexReader = &HeadAndOOOIndexReader{} -// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be -// accessed. -// It also has a reference to headIndexReader so we can leverage on its -// IndexReader implementation for all the methods that remain the same. We -// decided to do this to avoid code duplication. -// The only methods that change are the ones about getting Series and Postings. -type OOOHeadIndexReader struct { +type HeadAndOOOIndexReader struct { *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef } @@ -53,25 +49,16 @@ func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } -func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader { +func NewHeadAndOOOIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOIndexReader { hr := &headIndexReader{ head: head, mint: mint, maxt: maxt, } - return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef} + return &HeadAndOOOIndexReader{hr, lastGarbageCollectedMmapRef} } -func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0) -} - -// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so -// any chunk at or before this ref will not be considered. 0 disables this check. -// -// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then -// the oooHeadChunk will not be considered. -func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error { +func (oh *HeadAndOOOIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { @@ -88,10 +75,19 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra defer s.Unlock() *chks = (*chks)[:0] - if s.ooo == nil { - return nil + if s.ooo != nil { + return getOOOSeriesChunks(s, oh.mint, oh.maxt, oh.lastGarbageCollectedMmapRef, 0, true, chks) } + *chks = appendSeriesChunks(s, oh.mint, oh.maxt, *chks) + return nil +} +// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so +// any chunk at or before this ref will not be considered. 0 disables this check. +// +// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then +// the oooHeadChunk will not be considered. +func getOOOSeriesChunks(s *memSeries, mint, maxt int64, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef, includeInOrder bool, chks *[]chunks.Meta) error { tmpChks := make([]chunks.Meta, 0, len(s.ooo.oooMmappedChunks)) addChunk := func(minT, maxT int64, ref chunks.ChunkRef, chunk chunkenc.Chunk) { @@ -106,7 +102,7 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // Collect all chunks that overlap the query range. if s.ooo.oooHeadChunk != nil { c := s.ooo.oooHeadChunk - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { + if c.OverlapsClosedInterval(mint, maxt) && maxMmapRef == 0 { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) if len(c.chunk.samples) > 0 { // Empty samples happens in tests, at least. chks, err := s.ooo.oooHeadChunk.chunk.ToEncodedChunks(c.minTime, c.maxTime) @@ -125,12 +121,16 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra } for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { c := s.ooo.oooMmappedChunks[i] - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) { + if c.OverlapsClosedInterval(mint, maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) addChunk(c.minTime, c.maxTime, ref, nil) } } + if includeInOrder { + tmpChks = appendSeriesChunks(s, mint, maxt, tmpChks) + } + // There is nothing to do if we did not collect any chunk. if len(tmpChks) == 0 { return nil @@ -140,38 +140,43 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // those that overlap. slices.SortFunc(tmpChks, lessByMinTimeAndMinRef) - // Next we want to iterate the sorted collected chunks and only return the - // chunks Meta the first chunk that overlaps with others. + // Next we want to iterate the sorted collected chunks and return composites for chunks that overlap with others. // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) - // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to - // return chunk Metas for chunk 5 and chunk 6e - *chks = append(*chks, tmpChks[0]) - maxTime := tmpChks[0].MaxTime // Tracks the maxTime of the previous "to be merged chunk". + // In the example 5 overlaps with 7 and 6 overlaps with 8 so we will return + // [5,7], [6,8]. + toBeMerged := tmpChks[0] for _, c := range tmpChks[1:] { - switch { - case c.MinTime > maxTime: - *chks = append(*chks, c) - maxTime = c.MaxTime - case c.MaxTime > maxTime: - maxTime = c.MaxTime - (*chks)[len(*chks)-1].MaxTime = c.MaxTime - fallthrough - default: - // If the head OOO chunk is part of an output chunk, copy the chunk pointer. - if c.Chunk != nil { - (*chks)[len(*chks)-1].Chunk = c.Chunk + if c.MinTime > toBeMerged.MaxTime { + // This chunk doesn't overlap. Send current toBeMerged to output and start a new one. + *chks = append(*chks, toBeMerged) + toBeMerged = c + } else { + // Merge this chunk with existing toBeMerged. + if mm, ok := toBeMerged.Chunk.(*multiMeta); ok { + mm.metas = append(mm.metas, c) + } else { + toBeMerged.Chunk = &multiMeta{metas: []chunks.Meta{toBeMerged, c}} + } + if toBeMerged.MaxTime < c.MaxTime { + toBeMerged.MaxTime = c.MaxTime } } } + *chks = append(*chks, toBeMerged) return nil } -// LabelValues needs to be overridden from the headIndexReader implementation due -// to the check that happens at the beginning where we make sure that the query -// interval overlaps with the head minooot and maxooot. -func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { - if oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxOOOTime() { +// Fake Chunk object to pass a set of Metas inside Meta.Chunk. +type multiMeta struct { + chunkenc.Chunk // We don't expect any of the methods to be called. + metas []chunks.Meta +} + +// LabelValues needs to be overridden from the headIndexReader implementation +// so we can return labels within either in-order range or ooo range. +func (oh *HeadAndOOOIndexReader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { + if oh.maxt < oh.head.MinTime() && oh.maxt < oh.head.MinOOOTime() || oh.mint > oh.head.MaxTime() && oh.mint > oh.head.MaxOOOTime() { return []string{}, nil } @@ -182,29 +187,6 @@ func (oh *OOOHeadIndexReader) LabelValues(ctx context.Context, name string, matc return labelValuesWithMatchers(ctx, oh, name, matchers...) } -type chunkMetaAndChunkDiskMapperRef struct { - meta chunks.Meta - ref chunks.ChunkDiskMapperRef -} - -func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int { - switch { - case a.meta.MinTime < b.meta.MinTime: - return -1 - case a.meta.MinTime > b.meta.MinTime: - return 1 - } - - switch { - case a.meta.Ref < b.meta.Ref: - return -1 - case a.meta.Ref > b.meta.Ref: - return 1 - default: - return 0 - } -} - func lessByMinTimeAndMinRef(a, b chunks.Meta) int { switch { case a.MinTime < b.MinTime: @@ -223,75 +205,89 @@ func lessByMinTimeAndMinRef(a, b chunks.Meta) int { } } -func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { - switch len(values) { - case 0: - return index.EmptyPostings(), nil - case 1: - return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings - default: - // TODO(ganesh) We want to only return postings for out of order series. - res := make([]index.Postings, 0, len(values)) - for _, value := range values { - res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings - } - return index.Merge(ctx, res...), nil +type HeadAndOOOChunkReader struct { + head *Head + mint, maxt int64 + cr *headChunkReader // If nil, only read OOO chunks. + maxMmapRef chunks.ChunkDiskMapperRef + oooIsoState *oooIsolationState +} + +func NewHeadAndOOOChunkReader(head *Head, mint, maxt int64, cr *headChunkReader, oooIsoState *oooIsolationState, maxMmapRef chunks.ChunkDiskMapperRef) *HeadAndOOOChunkReader { + return &HeadAndOOOChunkReader{ + head: head, + mint: mint, + maxt: maxt, + cr: cr, + maxMmapRef: maxMmapRef, + oooIsoState: oooIsoState, } } -type OOOHeadChunkReader struct { - head *Head - mint, maxt int64 - isoState *oooIsolationState +func (cr *HeadAndOOOChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { + c, it, _, err := cr.chunkOrIterable(meta, false) + return c, it, err } -func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader { - return &OOOHeadChunkReader{ - head: head, - mint: mint, - maxt: maxt, - isoState: isoState, - } +// ChunkOrIterableWithCopy implements ChunkReaderWithCopy. The special Copy +// behaviour is only implemented for the in-order head chunk. +func (cr *HeadAndOOOChunkReader) ChunkOrIterableWithCopy(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { + return cr.chunkOrIterable(meta, true) } -func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { - sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack() - +func (cr *HeadAndOOOChunkReader) chunkOrIterable(meta chunks.Meta, copyLastChunk bool) (chunkenc.Chunk, chunkenc.Iterable, int64, error) { + sid, cid, isOOO := unpackHeadChunkRef(meta.Ref) s := cr.head.series.getByID(sid) // This means that the series has been garbage collected. if s == nil { - return nil, nil, storage.ErrNotFound + return nil, nil, 0, storage.ErrNotFound + } + var isoState *isolationState + if cr.cr != nil { + isoState = cr.cr.isoState } s.Lock() - if s.ooo == nil { - // There is no OOO data for this series. - s.Unlock() - return nil, nil, storage.ErrNotFound - } - mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) - s.Unlock() - if err != nil { - return nil, nil, err - } + defer s.Unlock() - // This means that the query range did not overlap with the requested chunk. - if len(mc.chunkIterables) == 0 { - return nil, nil, storage.ErrNotFound + if meta.Chunk == nil { + c, maxt, err := cr.head.chunkFromSeries(s, cid, isOOO, meta.MinTime, meta.MaxTime, isoState, copyLastChunk) + return c, nil, maxt, err } - - return nil, mc, nil + mm, ok := meta.Chunk.(*multiMeta) + if !ok { // Complete chunk was supplied. + return meta.Chunk, nil, meta.MaxTime, nil + } + // We have a composite meta: construct a composite iterable. + mc := &mergedOOOChunks{} + for _, m := range mm.metas { + switch { + case m.Chunk != nil: + mc.chunkIterables = append(mc.chunkIterables, m.Chunk) + default: + _, cid, isOOO := unpackHeadChunkRef(m.Ref) + iterable, _, err := cr.head.chunkFromSeries(s, cid, isOOO, m.MinTime, m.MaxTime, isoState, copyLastChunk) + if err != nil { + return nil, nil, 0, fmt.Errorf("invalid head chunk: %w", err) + } + mc.chunkIterables = append(mc.chunkIterables, iterable) + } + } + return nil, mc, meta.MaxTime, nil } -func (cr OOOHeadChunkReader) Close() error { - if cr.isoState != nil { - cr.isoState.Close() +func (cr *HeadAndOOOChunkReader) Close() error { + if cr.cr != nil && cr.cr.isoState != nil { + cr.cr.isoState.Close() + } + if cr.oooIsoState != nil { + cr.oooIsoState.Close() } return nil } type OOOCompactionHead struct { - oooIR *OOOHeadIndexReader + head *Head lastMmapRef chunks.ChunkDiskMapperRef lastWBLFile int postings []storage.SeriesRef @@ -308,6 +304,7 @@ type OOOCompactionHead struct { // on the sample append latency. So call NewOOOCompactionHead only right before compaction. func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, error) { ch := &OOOCompactionHead{ + head: head, chunkRange: head.chunkRange.Load(), mint: math.MaxInt64, maxt: math.MinInt64, @@ -321,15 +318,14 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, ch.lastWBLFile = lastWBLFile } - ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0) + hr := headIndexReader{head: head, mint: ch.mint, maxt: ch.maxt} n, v := index.AllPostingsKey() - - // TODO: verify this gets only ooo samples. - p, err := ch.oooIR.Postings(ctx, n, v) + // TODO: filter to series with OOO samples, before sorting. + p, err := hr.Postings(ctx, n, v) if err != nil { return nil, err } - p = ch.oooIR.SortedPostings(p) + p = hr.SortedPostings(p) var lastSeq, lastOff int for p.Next() { @@ -350,7 +346,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, } var lastMmapRef chunks.ChunkDiskMapperRef - mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) + mmapRefs := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper, head.logger) if len(mmapRefs) == 0 && len(ms.ooo.oooMmappedChunks) > 0 { // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. mmapRefs = []chunks.ChunkDiskMapperRef{ms.ooo.oooMmappedChunks[len(ms.ooo.oooMmappedChunks)-1].ref} @@ -386,7 +382,7 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) { } func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil + return NewHeadAndOOOChunkReader(ch.head, ch.mint, ch.maxt, nil, nil, ch.lastMmapRef), nil } func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { @@ -412,12 +408,12 @@ func (ch *OOOCompactionHead) Meta() BlockMeta { // Only the method of BlockReader interface are valid for the cloned OOOCompactionHead. func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead { return &OOOCompactionHead{ - oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0), + head: ch.head, lastMmapRef: ch.lastMmapRef, postings: ch.postings, chunkRange: ch.chunkRange, - mint: ch.mint, - maxt: ch.maxt, + mint: mint, + maxt: maxt, } } @@ -437,7 +433,8 @@ func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader { } func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter { - return ir.ch.oooIR.Symbols() + hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt} + return hr.Symbols() } func (ir *OOOCompactionHeadIndexReader) Postings(_ context.Context, name string, values ...string) (index.Postings, error) { @@ -458,11 +455,28 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P } func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { - return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount) + hr := headIndexReader{head: ir.ch.head, mint: ir.ch.mint, maxt: ir.ch.maxt} + return hr.ShardedPostings(p, shardIndex, shardCount) } func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef) + s := ir.ch.head.series.getByID(chunks.HeadSeriesRef(ref)) + + if s == nil { + ir.ch.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + builder.Assign(s.labels()) + + s.Lock() + defer s.Unlock() + *chks = (*chks)[:0] + + if s.ooo == nil { + return nil + } + + return getOOOSeriesChunks(s, ir.ch.mint, ir.ch.maxt, 0, ir.ch.lastMmapRef, false, chks) } func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { @@ -490,5 +504,91 @@ func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ctx context.Context, posti } func (ir *OOOCompactionHeadIndexReader) Close() error { - return ir.ch.oooIR.Close() + return nil +} + +// HeadAndOOOQuerier queries both the head and the out-of-order head. +type HeadAndOOOQuerier struct { + mint, maxt int64 + head *Head + index IndexReader + chunkr ChunkReader + querier storage.Querier +} + +func NewHeadAndOOOQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.Querier) storage.Querier { + cr := &headChunkReader{ + head: head, + mint: mint, + maxt: maxt, + isoState: head.iso.State(mint, maxt), + } + return &HeadAndOOOQuerier{ + mint: mint, + maxt: maxt, + head: head, + index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), + querier: querier, + } +} + +func (q *HeadAndOOOQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelValues(ctx, name, hints, matchers...) +} + +func (q *HeadAndOOOQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelNames(ctx, hints, matchers...) +} + +func (q *HeadAndOOOQuerier) Close() error { + q.chunkr.Close() + return q.querier.Close() +} + +func (q *HeadAndOOOQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + return selectSeriesSet(ctx, sortSeries, hints, matchers, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) +} + +// HeadAndOOOChunkQuerier queries both the head and the out-of-order head. +type HeadAndOOOChunkQuerier struct { + mint, maxt int64 + head *Head + index IndexReader + chunkr ChunkReader + querier storage.ChunkQuerier +} + +func NewHeadAndOOOChunkQuerier(mint, maxt int64, head *Head, oooIsoState *oooIsolationState, querier storage.ChunkQuerier) storage.ChunkQuerier { + cr := &headChunkReader{ + head: head, + mint: mint, + maxt: maxt, + isoState: head.iso.State(mint, maxt), + } + return &HeadAndOOOChunkQuerier{ + mint: mint, + maxt: maxt, + head: head, + index: NewHeadAndOOOIndexReader(head, mint, maxt, oooIsoState.minRef), + chunkr: NewHeadAndOOOChunkReader(head, mint, maxt, cr, oooIsoState, 0), + querier: querier, + } +} + +func (q *HeadAndOOOChunkQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelValues(ctx, name, hints, matchers...) +} + +func (q *HeadAndOOOChunkQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) { + return q.querier.LabelNames(ctx, hints, matchers...) +} + +func (q *HeadAndOOOChunkQuerier) Close() error { + q.chunkr.Close() + return q.querier.Close() +} + +func (q *HeadAndOOOChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + return selectChunkSeriesSet(ctx, sortSeries, hints, matchers, rangeHeadULID, q.index, q.chunkr, q.head.tombstones, q.mint, q.maxt) } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index 7ecd355b5..40e37043b 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -39,6 +39,11 @@ type chunkInterval struct { maxt int64 } +type expChunk struct { + c chunkInterval + m []chunkInterval +} + // permutateChunkIntervals returns all possible orders of the given chunkIntervals. func permutateChunkIntervals(in []chunkInterval, out [][]chunkInterval, left, right int) [][]chunkInterval { if left == right { @@ -65,7 +70,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { queryMinT int64 queryMaxT int64 inputChunkIntervals []chunkInterval - expChunks []chunkInterval + expChunks []expChunk }{ { name: "Empty result and no error when head is empty", @@ -107,8 +112,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700 // Query Interval [-----------------------------------------------------------] // Chunk 0: [---------------------------------------] - expChunks: []chunkInterval{ - {0, 150, 350}, + expChunks: []expChunk{ + {c: chunkInterval{0, 150, 350}}, }, }, { @@ -121,8 +126,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // ts 0 100 150 200 250 300 350 400 450 500 550 600 650 700 // Query Interval: [---------------------------------------] // Chunk 0: [-----------------------------------------------------------] - expChunks: []chunkInterval{ - {0, 100, 400}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 400}}, }, }, { @@ -142,9 +147,9 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 2: [-------------------] // Chunk 3: [-------------------] // Output Graphically [-----------------------------] [-----------------------------] - expChunks: []chunkInterval{ - {0, 100, 250}, - {1, 500, 650}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 250}, m: []chunkInterval{{0, 100, 200}, {2, 150, 250}}}, + {c: chunkInterval{1, 500, 650}, m: []chunkInterval{{1, 500, 600}, {3, 550, 650}}}, }, }, { @@ -164,8 +169,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 2: [-------------------] // Chunk 3: [------------------] // Output Graphically [------------------------------------------------------------------------------] - expChunks: []chunkInterval{ - {0, 100, 500}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 500}, m: []chunkInterval{{0, 100, 200}, {1, 200, 300}, {2, 300, 400}, {3, 400, 500}}}, }, }, { @@ -185,11 +190,11 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 2: [------------------] // Chunk 3: [------------------] // Output Graphically [------------------][------------------][------------------][------------------] - expChunks: []chunkInterval{ - {0, 100, 199}, - {1, 200, 299}, - {2, 300, 399}, - {3, 400, 499}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 199}}, + {c: chunkInterval{1, 200, 299}}, + {c: chunkInterval{2, 300, 399}}, + {c: chunkInterval{3, 400, 499}}, }, }, { @@ -209,8 +214,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 2: [------------------] // Chunk 3: [------------------] // Output Graphically [-----------------------------------------------] - expChunks: []chunkInterval{ - {0, 100, 350}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 350}, m: []chunkInterval{{0, 100, 200}, {1, 150, 300}, {2, 250, 350}}}, }, }, { @@ -228,8 +233,8 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 1: [-----------------------------] // Chunk 2: [------------------------------] // Output Graphically [-----------------------------------------------------------------------------------------] - expChunks: []chunkInterval{ - {1, 0, 500}, + expChunks: []expChunk{ + {c: chunkInterval{1, 0, 500}, m: []chunkInterval{{1, 0, 200}, {2, 150, 300}, {0, 250, 500}}}, }, }, { @@ -251,9 +256,9 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 3: [-------------------] // Chunk 4: [---------------------------------------] // Output Graphically [---------------------------------------] [------------------------------------------------] - expChunks: []chunkInterval{ - {0, 100, 300}, - {4, 600, 850}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 300}, m: []chunkInterval{{0, 100, 300}, {2, 150, 250}}}, + {c: chunkInterval{4, 600, 850}, m: []chunkInterval{{4, 600, 800}, {3, 650, 750}, {1, 770, 850}}}, }, }, { @@ -271,10 +276,10 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { // Chunk 1: [----------] // Chunk 2: [--------] // Output Graphically [-------] [--------] [----------] - expChunks: []chunkInterval{ - {0, 100, 150}, - {1, 300, 350}, - {2, 200, 250}, + expChunks: []expChunk{ + {c: chunkInterval{0, 100, 150}}, + {c: chunkInterval{2, 200, 250}}, + {c: chunkInterval{1, 300, 350}}, }, }, } @@ -305,24 +310,38 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { s1.ooo = &memSeriesOOOFields{} // define our expected chunks, by looking at the expected ChunkIntervals and setting... + // Ref to whatever Ref the chunk has, that we refer to by ID + findID := func(id int) chunks.ChunkRef { + for ref, c := range intervals { + if c.ID == id { + return chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), s1.oooHeadChunkID(ref))) + } + } + return 0 + } var expChunks []chunks.Meta for _, e := range tc.expChunks { - meta := chunks.Meta{ - Chunk: chunkenc.Chunk(nil), - MinTime: e.mint, - MaxTime: e.maxt, - } - - // Ref to whatever Ref the chunk has, that we refer to by ID - for ref, c := range intervals { - if c.ID == e.ID { - meta.Ref = chunks.ChunkRef(chunks.NewHeadChunkRef(chunks.HeadSeriesRef(s1ID), chunks.HeadChunkID(ref))) - break + var chunk chunkenc.Chunk + if len(e.m) > 0 { + mm := &multiMeta{} + for _, x := range e.m { + meta := chunks.Meta{ + MinTime: x.mint, + MaxTime: x.maxt, + Ref: findID(x.ID), + } + mm.metas = append(mm.metas, meta) } + chunk = mm + } + meta := chunks.Meta{ + Chunk: chunk, + MinTime: e.c.mint, + MaxTime: e.c.maxt, + Ref: findID(e.c.ID), } expChunks = append(expChunks, meta) } - slices.SortFunc(expChunks, lessByMinTimeAndMinRef) // We always want the chunks to come back sorted by minTime asc. if headChunk && len(intervals) > 0 { // Put the last interval in the head chunk @@ -341,7 +360,7 @@ func TestOOOHeadIndexReader_Series(t *testing.T) { }) } - ir := NewOOOHeadIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(h, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder @@ -421,17 +440,17 @@ func testOOOHeadChunkReader_LabelValues(t *testing.T, scenario sampleTypeScenari name: "LabelValues calls with ooo head query range not overlapping out-of-order data", queryMinT: 100, queryMaxT: 100, - expValues1: []string{}, - expValues2: []string{}, - expValues3: []string{}, - expValues4: []string{}, + expValues1: []string{"bar1"}, + expValues2: nil, + expValues3: []string{"bar1", "bar2"}, + expValues4: []string{"bar1", "bar2"}, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { // We first want to test using a head index reader that covers the biggest query interval - oh := NewOOOHeadIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) + oh := NewHeadAndOOOIndexReader(head, tc.queryMinT, tc.queryMaxT, 0) matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar1")} values, err := oh.LabelValues(ctx, "foo", matchers...) sort.Strings(values) @@ -481,10 +500,10 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { t.Run("Getting a non existing chunk fails with not found error", func(t *testing.T) { db := newTestDBWithOpts(t, opts) - cr := NewOOOHeadChunkReader(db.head, 0, 1000, nil) + cr := NewHeadAndOOOChunkReader(db.head, 0, 1000, nil, nil, 0) defer cr.Close() c, iterable, err := cr.ChunkOrIterable(chunks.Meta{ - Ref: 0x1000000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, + Ref: 0x1800000, Chunk: chunkenc.Chunk(nil), MinTime: 100, MaxTime: 300, }) require.Nil(t, iterable) require.Equal(t, err, fmt.Errorf("not found")) @@ -497,6 +516,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { queryMaxT int64 firstInOrderSampleAt int64 inputSamples []testValue + expSingleChunks bool expChunkError bool expChunksSamples []chunks.SampleSlice }{ @@ -509,7 +529,8 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { {Ts: minutes(30), V: 0}, {Ts: minutes(40), V: 0}, }, - expChunkError: false, + expChunkError: false, + expSingleChunks: true, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 // Query Interval [------------------------------------------------------------------------------------------] // Chunk 0: Current Head [--------] (With 2 samples) @@ -689,7 +710,8 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { {Ts: minutes(40), V: 3}, {Ts: minutes(42), V: 3}, }, - expChunkError: false, + expChunkError: false, + expSingleChunks: true, // ts (in minutes) 0 10 20 30 40 50 60 70 80 90 100 // Query Interval [------------------------------------------------------------------------------------------] // Chunk 0 [-------] @@ -832,21 +854,25 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) require.NoError(t, err) require.Equal(t, len(tc.expChunksSamples), len(chks)) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) + cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0) defer cr.Close() for i := 0; i < len(chks); i++ { c, iterable, err := cr.ChunkOrIterable(chks[i]) require.NoError(t, err) - require.Nil(t, c) - - it := iterable.Iterator(nil) + var it chunkenc.Iterator + if tc.expSingleChunks { + it = c.Iterator(nil) + } else { + require.Nil(t, c) + it = iterable.Iterator(nil) + } resultSamples, err := storage.ExpandSamples(it, nil) require.NoError(t, err) requireEqualSamples(t, s1.String(), tc.expChunksSamples[i], resultSamples, true) @@ -997,7 +1023,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( // The Series method populates the chunk metas, taking a copy of the // head OOO chunk if necessary. These are then used by the ChunkReader. - ir := NewOOOHeadIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) + ir := NewHeadAndOOOIndexReader(db.head, tc.queryMinT, tc.queryMaxT, 0) var chks []chunks.Meta var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) @@ -1013,7 +1039,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( } require.NoError(t, app.Commit()) - cr := NewOOOHeadChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil) + cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0) defer cr.Close() for i := 0; i < len(chks); i++ { c, iterable, err := cr.ChunkOrIterable(chks[i]) @@ -1029,94 +1055,6 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( } } -// TestSortByMinTimeAndMinRef tests that the sort function for chunk metas does sort -// by chunk meta MinTime and in case of same references by the lower reference. -func TestSortByMinTimeAndMinRef(t *testing.T) { - tests := []struct { - name string - input []chunkMetaAndChunkDiskMapperRef - exp []chunkMetaAndChunkDiskMapperRef - }{ - { - name: "chunks are ordered by min time", - input: []chunkMetaAndChunkDiskMapperRef{ - { - meta: chunks.Meta{ - Ref: 0, - MinTime: 0, - }, - ref: chunks.ChunkDiskMapperRef(0), - }, - { - meta: chunks.Meta{ - Ref: 1, - MinTime: 1, - }, - ref: chunks.ChunkDiskMapperRef(1), - }, - }, - exp: []chunkMetaAndChunkDiskMapperRef{ - { - meta: chunks.Meta{ - Ref: 0, - MinTime: 0, - }, - ref: chunks.ChunkDiskMapperRef(0), - }, - { - meta: chunks.Meta{ - Ref: 1, - MinTime: 1, - }, - ref: chunks.ChunkDiskMapperRef(1), - }, - }, - }, - { - name: "if same mintime, lower reference goes first", - input: []chunkMetaAndChunkDiskMapperRef{ - { - meta: chunks.Meta{ - Ref: 10, - MinTime: 0, - }, - ref: chunks.ChunkDiskMapperRef(0), - }, - { - meta: chunks.Meta{ - Ref: 5, - MinTime: 0, - }, - ref: chunks.ChunkDiskMapperRef(1), - }, - }, - exp: []chunkMetaAndChunkDiskMapperRef{ - { - meta: chunks.Meta{ - Ref: 5, - MinTime: 0, - }, - ref: chunks.ChunkDiskMapperRef(1), - }, - { - meta: chunks.Meta{ - Ref: 10, - MinTime: 0, - }, - ref: chunks.ChunkDiskMapperRef(0), - }, - }, - }, - } - - for _, tc := range tests { - t.Run(fmt.Sprintf("name=%s", tc.name), func(t *testing.T) { - slices.SortFunc(tc.input, refLessByMinTimeAndMinRef) - require.Equal(t, tc.exp, tc.input) - }) - } -} - // TestSortMetaByMinTimeAndMinRef tests that the sort function for chunk metas does sort // by chunk meta MinTime and in case of same references by the lower reference. func TestSortMetaByMinTimeAndMinRef(t *testing.T) { diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index 27ff4048b..d3cd5f601 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -14,8 +14,14 @@ package tsdb import ( + "math" "testing" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/tsdbutil" + "github.com/stretchr/testify/require" ) @@ -52,7 +58,7 @@ func TestOOOInsert(t *testing.T) { chunk := NewOOOChunk() chunk.samples = makeEvenSampleSlice(numPreExisting) newSample := samplify(valOdd(insertPos)) - chunk.Insert(newSample.t, newSample.f) + chunk.Insert(newSample.t, newSample.f, nil, nil) var expSamples []sample // Our expected new samples slice, will be first the original samples. @@ -83,7 +89,7 @@ func TestOOOInsertDuplicate(t *testing.T) { dupSample := chunk.samples[dupPos] dupSample.f = 0.123 - ok := chunk.Insert(dupSample.t, dupSample.f) + ok := chunk.Insert(dupSample.t, dupSample.f, nil, nil) expSamples := makeEvenSampleSlice(num) // We expect no change. require.False(t, ok) @@ -91,3 +97,136 @@ func TestOOOInsertDuplicate(t *testing.T) { } } } + +type chunkVerify struct { + encoding chunkenc.Encoding + minTime int64 + maxTime int64 +} + +func TestOOOChunks_ToEncodedChunks(t *testing.T) { + h1 := tsdbutil.GenerateTestHistogram(1) + // Make h2 appendable but with more buckets, to trigger recoding. + h2 := h1.Copy() + h2.PositiveSpans = append(h2.PositiveSpans, histogram.Span{Offset: 1, Length: 1}) + h2.PositiveBuckets = append(h2.PositiveBuckets, 12) + + testCases := map[string]struct { + samples []sample + expectedCounterResets []histogram.CounterResetHint + expectedChunks []chunkVerify + }{ + "empty": { + samples: []sample{}, + }, + "has floats": { + samples: []sample{ + {t: 1000, f: 43.0}, + {t: 1100, f: 42.0}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1100}, + }, + }, + "mix of floats and histograms": { + samples: []sample{ + {t: 1000, f: 43.0}, + {t: 1100, h: h1}, + {t: 1200, f: 42.0}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.UnknownCounterReset, histogram.UnknownCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncXOR, minTime: 1000, maxTime: 1000}, + {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, + {encoding: chunkenc.EncXOR, minTime: 1200, maxTime: 1200}, + }, + }, + "has a counter reset": { + samples: []sample{ + {t: 1000, h: h2}, + {t: 1100, h: h1}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.CounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncHistogram, minTime: 1000, maxTime: 1000}, + {encoding: chunkenc.EncHistogram, minTime: 1100, maxTime: 1100}, + }, + }, + "has a recoded histogram": { // Regression test for wrong minT, maxT in histogram recoding. + samples: []sample{ + {t: 0, h: h1}, + {t: 1, h: h2}, + }, + expectedCounterResets: []histogram.CounterResetHint{histogram.UnknownCounterReset, histogram.NotCounterReset}, + expectedChunks: []chunkVerify{ + {encoding: chunkenc.EncHistogram, minTime: 0, maxTime: 1}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Sanity check. + require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets") + + oooChunk := OOOChunk{} + for _, s := range tc.samples { + switch s.Type() { + case chunkenc.ValFloat: + oooChunk.Insert(s.t, s.f, nil, nil) + case chunkenc.ValHistogram: + oooChunk.Insert(s.t, 0, s.h.Copy(), nil) + case chunkenc.ValFloatHistogram: + oooChunk.Insert(s.t, 0, nil, s.fh.Copy()) + default: + t.Fatalf("unexpected sample type %d", s.Type()) + } + } + + chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64) + require.NoError(t, err) + require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks") + sampleIndex := 0 + for i, c := range chunks { + require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i) + require.Equal(t, tc.expectedChunks[i].minTime, c.minTime, "chunk %d minTime", i) + require.Equal(t, tc.expectedChunks[i].maxTime, c.maxTime, "chunk %d maxTime", i) + samples, err := storage.ExpandSamples(c.chunk.Iterator(nil), newSample) + require.GreaterOrEqual(t, len(tc.samples)-sampleIndex, len(samples), "too many samples in chunk %d expected less than %d", i, len(tc.samples)-sampleIndex) + require.NoError(t, err) + if len(samples) == 0 { + // Ignore empty chunks. + continue + } + switch c.chunk.Encoding() { + case chunkenc.EncXOR: + for j, s := range samples { + require.Equal(t, chunkenc.ValFloat, s.Type()) + // XOR chunks don't have counter reset hints, so we shouldn't expect anything else than UnknownCounterReset. + require.Equal(t, histogram.UnknownCounterReset, tc.expectedCounterResets[sampleIndex+j], "sample reset hint %d", sampleIndex+j) + require.Equal(t, tc.samples[sampleIndex+j].f, s.F(), "sample %d", sampleIndex+j) + } + case chunkenc.EncHistogram: + for j, s := range samples { + require.Equal(t, chunkenc.ValHistogram, s.Type()) + require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.H().CounterResetHint, "sample reset hint %d", sampleIndex+j) + compareTo := tc.samples[sampleIndex+j].h.Copy() + compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j] + require.Equal(t, compareTo, s.H().Compact(0), "sample %d", sampleIndex+j) + } + case chunkenc.EncFloatHistogram: + for j, s := range samples { + require.Equal(t, chunkenc.ValFloatHistogram, s.Type()) + require.Equal(t, tc.expectedCounterResets[sampleIndex+j], s.FH().CounterResetHint, "sample reset hint %d", sampleIndex+j) + compareTo := tc.samples[sampleIndex+j].fh.Copy() + compareTo.CounterResetHint = tc.expectedCounterResets[sampleIndex+j] + require.Equal(t, compareTo, s.FH().Compact(0), "sample %d", sampleIndex+j) + } + } + sampleIndex += len(samples) + } + require.Equal(t, len(tc.samples), sampleIndex, "number of samples") + }) + } +} diff --git a/tsdb/querier.go b/tsdb/querier.go index 910c2d7fc..912c95032 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -115,20 +115,24 @@ func NewBlockQuerier(b BlockReader, mint, maxt int64) (storage.Querier, error) { } func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.SeriesSet { - mint := q.mint - maxt := q.maxt + return selectSeriesSet(ctx, sortSeries, hints, ms, q.index, q.chunks, q.tombstones, q.mint, q.maxt) +} + +func selectSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher, + index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64, +) storage.SeriesSet { disableTrimming := false sharded := hints != nil && hints.ShardCount > 0 - p, err := PostingsForMatchers(ctx, q.index, ms...) + p, err := PostingsForMatchers(ctx, index, ms...) if err != nil { return storage.ErrSeriesSet(err) } if sharded { - p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) + p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) } if sortSeries { - p = q.index.SortedPostings(p) + p = index.SortedPostings(p) } if hints != nil { @@ -137,11 +141,11 @@ func (q *blockQuerier) Select(ctx context.Context, sortSeries bool, hints *stora disableTrimming = hints.DisableTrimming if hints.Func == "series" { // When you're only looking up metadata (for example series API), you don't need to load any chunks. - return newBlockSeriesSet(q.index, newNopChunkReader(), q.tombstones, p, mint, maxt, disableTrimming) + return newBlockSeriesSet(index, newNopChunkReader(), tombstones, p, mint, maxt, disableTrimming) } } - return newBlockSeriesSet(q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return newBlockSeriesSet(index, chunks, tombstones, p, mint, maxt, disableTrimming) } // blockChunkQuerier provides chunk querying access to a single block database. @@ -159,8 +163,12 @@ func NewBlockChunkQuerier(b BlockReader, mint, maxt int64) (storage.ChunkQuerier } func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet { - mint := q.mint - maxt := q.maxt + return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt) +} + +func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher, + blockID ulid.ULID, index IndexReader, chunks ChunkReader, tombstones tombstones.Reader, mint, maxt int64, +) storage.ChunkSeriesSet { disableTrimming := false sharded := hints != nil && hints.ShardCount > 0 @@ -169,17 +177,17 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints * maxt = hints.End disableTrimming = hints.DisableTrimming } - p, err := PostingsForMatchers(ctx, q.index, ms...) + p, err := PostingsForMatchers(ctx, index, ms...) if err != nil { return storage.ErrChunkSeriesSet(err) } if sharded { - p = q.index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) + p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount) } if sortSeries { - p = q.index.SortedPostings(p) + p = index.SortedPostings(p) } - return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) + return NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming) } // PostingsForMatchers assembles a single postings iterator against the index reader @@ -633,14 +641,16 @@ func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { } } - hcr, ok := p.cr.(*headChunkReader) + hcr, ok := p.cr.(ChunkReaderWithCopy) var iterable chunkenc.Iterable if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 { - // ChunkWithCopy will copy the head chunk. + // ChunkOrIterableWithCopy will copy the head chunk, if it can. var maxt int64 - p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta) - // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here. - p.currMeta.MaxTime = maxt + p.currMeta.Chunk, iterable, maxt, p.err = hcr.ChunkOrIterableWithCopy(p.currMeta) + if p.currMeta.Chunk != nil { + // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here. + p.currMeta.MaxTime = maxt + } } else { p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta) } @@ -962,7 +972,7 @@ func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { // Check if the encoding has changed (i.e. we need to create a new // chunk as chunks can't have multiple encoding types). // For the first sample, the following condition will always be true as - // ValNoneNone != ValFloat | ValHistogram | ValFloatHistogram. + // ValNone != ValFloat | ValHistogram | ValFloatHistogram. if currentValueType != prevValueType { if prevValueType != chunkenc.ValNone { p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) diff --git a/tsdb/querier_bench_test.go b/tsdb/querier_bench_test.go index 9a8230242..43accc253 100644 --- a/tsdb/querier_bench_test.go +++ b/tsdb/querier_bench_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/index" "github.com/stretchr/testify/require" @@ -254,56 +255,98 @@ func BenchmarkMergedStringIter(b *testing.B) { b.ReportAllocs() } -func BenchmarkQuerierSelect(b *testing.B) { - opts := DefaultHeadOptions() - opts.ChunkRange = 1000 - opts.ChunkDirRoot = b.TempDir() - h, err := NewHead(nil, nil, nil, nil, opts, nil) +func createHeadForBenchmarkSelect(b *testing.B, numSeries int, addSeries func(app storage.Appender, i int)) (*Head, *DB) { + dir := b.TempDir() + opts := DefaultOptions() + opts.OutOfOrderCapMax = 255 + opts.OutOfOrderTimeWindow = 1000 + db, err := Open(dir, nil, nil, opts, nil) require.NoError(b, err) - defer h.Close() + b.Cleanup(func() { + require.NoError(b, db.Close()) + }) + h := db.Head() + app := h.Appender(context.Background()) - numSeries := 1000000 for i := 0; i < numSeries; i++ { - app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0) + addSeries(app, i) } require.NoError(b, app.Commit()) + return h, db +} - bench := func(b *testing.B, br BlockReader, sorted bool) { - matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar") - for s := 1; s <= numSeries; s *= 10 { - b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) { - q, err := NewBlockQuerier(br, 0, int64(s-1)) - require.NoError(b, err) +func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, sorted bool) { + matcher := labels.MustNewMatcher(labels.MatchEqual, "foo", "bar") + b.ResetTimer() + for s := 1; s <= numSeries; s *= 10 { + b.Run(fmt.Sprintf("%dof%d", s, numSeries), func(b *testing.B) { + q, err := queryable.Querier(0, int64(s-1)) + require.NoError(b, err) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ss := q.Select(context.Background(), sorted, nil, matcher) - for ss.Next() { - } - require.NoError(b, ss.Err()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + ss := q.Select(context.Background(), sorted, nil, matcher) + for ss.Next() { } - q.Close() - }) - } + require.NoError(b, ss.Err()) + } + q.Close() + }) } +} + +func BenchmarkQuerierSelect(b *testing.B) { + numSeries := 1000000 + h, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) { + _, err := app.Append(0, labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)), int64(i), 0) + if err != nil { + b.Fatal(err) + } + }) b.Run("Head", func(b *testing.B) { - bench(b, h, false) + benchmarkSelect(b, db, numSeries, false) }) b.Run("SortedHead", func(b *testing.B) { - bench(b, h, true) + benchmarkSelect(b, db, numSeries, true) }) - tmpdir := b.TempDir() - - blockdir := createBlockFromHead(b, tmpdir, h) - block, err := OpenBlock(nil, blockdir, nil) - require.NoError(b, err) - defer func() { - require.NoError(b, block.Close()) - }() - b.Run("Block", func(b *testing.B) { - bench(b, block, false) + tmpdir := b.TempDir() + + blockdir := createBlockFromHead(b, tmpdir, h) + block, err := OpenBlock(nil, blockdir, nil) + require.NoError(b, err) + defer func() { + require.NoError(b, block.Close()) + }() + + benchmarkSelect(b, (*queryableBlock)(block), numSeries, false) + }) +} + +// Type wrapper to let a Block be a Queryable in benchmarkSelect(). +type queryableBlock Block + +func (pb *queryableBlock) Querier(mint, maxt int64) (storage.Querier, error) { + return NewBlockQuerier((*Block)(pb), mint, maxt) +} + +func BenchmarkQuerierSelectWithOutOfOrder(b *testing.B) { + numSeries := 1000000 + _, db := createHeadForBenchmarkSelect(b, numSeries, func(app storage.Appender, i int) { + l := labels.FromStrings("foo", "bar", "i", fmt.Sprintf("%d%s", i, postingsBenchSuffix)) + ref, err := app.Append(0, l, int64(i+1), 0) + if err != nil { + b.Fatal(err) + } + _, err = app.Append(ref, l, int64(i), 1) // Out of order sample + if err != nil { + b.Fatal(err) + } + }) + + b.Run("Head", func(b *testing.B) { + benchmarkSelect(b, db, numSeries, false) }) } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index ffdf8dc02..50525f65f 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -3169,12 +3169,11 @@ func BenchmarkQueries(b *testing.B) { qHead, err := NewBlockQuerier(NewRangeHead(head, 1, nSamples), 1, nSamples) require.NoError(b, err) - qOOOHead, err := NewBlockQuerier(NewOOORangeHead(head, 1, nSamples, 0), 1, nSamples) - require.NoError(b, err) + isoState := head.oooIso.TrackReadAfter(0) + qOOOHead := NewHeadAndOOOQuerier(1, nSamples, head, isoState, qHead) queryTypes = append(queryTypes, qt{ - fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), - storage.NewMergeQuerier([]storage.Querier{qHead, qOOOHead}, nil, storage.ChainedSeriesMerge), + fmt.Sprintf("_Head_oooPercent:%d", oooPercentage), qOOOHead, }) } diff --git a/tsdb/wlog/watcher.go b/tsdb/wlog/watcher.go index bc7a144e6..ac5041e87 100644 --- a/tsdb/wlog/watcher.go +++ b/tsdb/wlog/watcher.go @@ -20,7 +20,6 @@ import ( "math" "os" "path/filepath" - "slices" "strconv" "strings" "time" @@ -59,15 +58,16 @@ type WriteTo interface { StoreSeries([]record.RefSeries, int) StoreMetadata([]record.RefMetadata) - // Next two methods are intended for garbage-collection: first we call - // UpdateSeriesSegment on all current series + // UpdateSeriesSegment and SeriesReset are intended for + // garbage-collection: + // First we call UpdateSeriesSegment on all current series. UpdateSeriesSegment([]record.RefSeries, int) - // Then SeriesReset is called to allow the deletion - // of all series created in a segment lower than the argument. + // Then SeriesReset is called to allow the deletion of all series + // created in a segment lower than the argument. SeriesReset(int) } -// Used to notify the watcher that data has been written so that it can read. +// WriteNotified notifies the watcher that data has been written so that it can read. type WriteNotified interface { Notify() } @@ -265,9 +265,9 @@ func (w *Watcher) loop() { // Run the watcher, which will tail the WAL until the quit channel is closed // or an error case is hit. func (w *Watcher) Run() error { - _, lastSegment, err := w.firstAndLast() + _, lastSegment, err := Segments(w.walDir) if err != nil { - return fmt.Errorf("wal.Segments: %w", err) + return fmt.Errorf("Segments: %w", err) } // We want to ensure this is false across iterations since @@ -318,57 +318,20 @@ func (w *Watcher) Run() error { // findSegmentForIndex finds the first segment greater than or equal to index. func (w *Watcher) findSegmentForIndex(index int) (int, error) { - refs, err := w.segments(w.walDir) + refs, err := listSegments(w.walDir) if err != nil { return -1, err } for _, r := range refs { - if r >= index { - return r, nil + if r.index >= index { + return r.index, nil } } return -1, errors.New("failed to find segment for index") } -func (w *Watcher) firstAndLast() (int, int, error) { - refs, err := w.segments(w.walDir) - if err != nil { - return -1, -1, err - } - - if len(refs) == 0 { - return -1, -1, nil - } - return refs[0], refs[len(refs)-1], nil -} - -// Copied from tsdb/wlog/wlog.go so we do not have to open a WAL. -// Plan is to move WAL watcher to TSDB and dedupe these implementations. -func (w *Watcher) segments(dir string) ([]int, error) { - files, err := os.ReadDir(dir) - if err != nil { - return nil, err - } - - var refs []int - for _, f := range files { - k, err := strconv.Atoi(f.Name()) - if err != nil { - continue - } - refs = append(refs, k) - } - slices.Sort(refs) - for i := 0; i < len(refs)-1; i++ { - if refs[i]+1 != refs[i+1] { - return nil, errors.New("segments are not sequential") - } - } - return refs, nil -} - func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, size int64) error { err := w.readSegment(r, segmentNum, tail) @@ -447,35 +410,17 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Currently doing a garbage collect, try again later. } + // if a newer segment is produced, read the current one until the end and move on. case <-segmentTicker.C: - _, last, err := w.firstAndLast() + _, last, err := Segments(w.walDir) if err != nil { - return fmt.Errorf("segments: %w", err) + return fmt.Errorf("Segments: %w", err) } - // Check if new segments exists. - if last <= segmentNum { - continue + if last > segmentNum { + return w.readAndHandleError(reader, segmentNum, tail, size) } - err = w.readSegment(reader, segmentNum, tail) - - // Ignore errors reading to end of segment whilst replaying the WAL. - if !tail { - switch { - case err != nil && !errors.Is(err, io.EOF): - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) - case reader.Offset() != size: - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) - } - return nil - } - - // Otherwise, when we are tailing, non-EOFs are fatal. - if err != nil && !errors.Is(err, io.EOF) { - return err - } - - return nil + continue // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: @@ -484,7 +429,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { if err != nil { return err } - // still want to reset the ticker so we don't read too often + // reset the ticker so we don't read too often readTicker.Reset(readTimeout) case <-w.readNotify: @@ -492,7 +437,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { if err != nil { return err } - // still want to reset the ticker so we don't read too often + // reset the ticker so we don't read too often readTicker.Reset(readTimeout) } } @@ -628,6 +573,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { w.writer.AppendHistograms(histogramsToSend) histogramsToSend = histogramsToSend[:0] } + case record.FloatHistogramSamples: // Skip if experimental "histograms over remote write" is not enabled. if !w.sendHistograms { @@ -657,7 +603,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { } case record.Metadata: - if !w.sendMetadata || !tail { + if !w.sendMetadata { break } meta, err := dec.Metadata(rec, metadata[:0]) @@ -666,11 +612,13 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { return err } w.writer.StoreMetadata(meta) - case record.Tombstones: - default: + case record.Unknown: // Could be corruption, or reading from a WAL from a newer Prometheus. w.recordDecodeFailsMetric.Inc() + + default: + // We're not interested in other types of records. } } if err := r.Err(); err != nil { @@ -699,14 +647,12 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error } w.writer.UpdateSeriesSegment(series, segmentNum) - // Ignore these; we're only interested in series. - case record.Samples: - case record.Exemplars: - case record.Tombstones: - - default: + case record.Unknown: // Could be corruption, or reading from a WAL from a newer Prometheus. w.recordDecodeFailsMetric.Inc() + + default: + // We're only interested in series. } } if err := r.Err(); err != nil { @@ -731,29 +677,30 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } // Ensure we read the whole contents of every segment in the checkpoint dir. - segs, err := w.segments(checkpointDir) + segs, err := listSegments(checkpointDir) if err != nil { return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) } - for _, seg := range segs { - size, err := getSegmentSize(checkpointDir, seg) + for _, segRef := range segs { + size, err := getSegmentSize(checkpointDir, segRef.index) if err != nil { return fmt.Errorf("getSegmentSize: %w", err) } - sr, err := OpenReadSegment(SegmentName(checkpointDir, seg)) + sr, err := OpenReadSegment(SegmentName(checkpointDir, segRef.index)) if err != nil { return fmt.Errorf("unable to open segment: %w", err) } - defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) { + err = readFn(w, r, index, false) + sr.Close() + if err != nil && !errors.Is(err, io.EOF) { return fmt.Errorf("readSegment: %w", err) } if r.Offset() != size { - return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset()) + return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, segRef.index, size, r.Offset()) } } diff --git a/tsdb/wlog/watcher_test.go b/tsdb/wlog/watcher_test.go index 70c84b4ff..dc0314e8c 100644 --- a/tsdb/wlog/watcher_test.go +++ b/tsdb/wlog/watcher_test.go @@ -230,11 +230,11 @@ func TestTailSamples(t *testing.T) { for i := first; i <= last; i++ { segment, err := OpenReadSegment(SegmentName(watcher.walDir, i)) require.NoError(t, err) - defer segment.Close() reader := NewLiveReader(nil, NewLiveReaderMetrics(nil), segment) // Use tail true so we can ensure we got the right number of samples. watcher.readSegment(reader, i, true) + require.NoError(t, segment.Close()) } expectedSeries := seriesCount diff --git a/tsdb/wlog/wlog.go b/tsdb/wlog/wlog.go index 668fbb5fb..b14521f35 100644 --- a/tsdb/wlog/wlog.go +++ b/tsdb/wlog/wlog.go @@ -38,8 +38,8 @@ import ( ) const ( - DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB - pageSize = 32 * 1024 // 32KB + DefaultSegmentSize = 128 * 1024 * 1024 // DefaultSegmentSize is 128 MB. + pageSize = 32 * 1024 // pageSize is 32KB. recordHeaderSize = 7 WblDirName = "wbl" ) @@ -612,16 +612,16 @@ func (w *WL) setSegment(segment *Segment) error { // flushPage writes the new contents of the page to disk. If no more records will fit into // the page, the remaining bytes will be set to zero and a new page will be started. -// If clear is true, this is enforced regardless of how many bytes are left in the page. -func (w *WL) flushPage(clear bool) error { +// If forceClear is true, this is enforced regardless of how many bytes are left in the page. +func (w *WL) flushPage(forceClear bool) error { w.metrics.pageFlushes.Inc() p := w.page - clear = clear || p.full() + shouldClear := forceClear || p.full() // No more data will fit into the page or an implicit clear. // Enqueue and clear it. - if clear { + if shouldClear { p.alloc = pageSize // Write till end of page. } @@ -633,7 +633,7 @@ func (w *WL) flushPage(clear bool) error { p.flushed += n // We flushed an entire page, prepare a new one. - if clear { + if shouldClear { p.reset() w.donePages++ w.metrics.pageCompletions.Inc() diff --git a/util/almost/almost.go b/util/almost/almost.go index 34f1290a5..5f866b89b 100644 --- a/util/almost/almost.go +++ b/util/almost/almost.go @@ -13,13 +13,23 @@ package almost -import "math" +import ( + "math" + + "github.com/prometheus/prometheus/model/value" +) var minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64. // Equal returns true if a and b differ by less than their sum -// multiplied by epsilon. +// multiplied by epsilon, or if both are StaleNaN, or if both are any other NaN. func Equal(a, b, epsilon float64) bool { + // StaleNaN is a special value that is used as staleness maker, and + // we don't want it to compare equal to any other NaN. + if value.IsStaleNaN(a) || value.IsStaleNaN(b) { + return value.IsStaleNaN(a) && value.IsStaleNaN(b) + } + // NaN has no equality but for testing we still want to know whether both values // are NaN. if math.IsNaN(a) && math.IsNaN(b) { diff --git a/util/almost/almost_test.go b/util/almost/almost_test.go new file mode 100644 index 000000000..fba37f13f --- /dev/null +++ b/util/almost/almost_test.go @@ -0,0 +1,50 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package almost + +import ( + "fmt" + "math" + "testing" + + "github.com/prometheus/prometheus/model/value" +) + +func TestEqual(t *testing.T) { + staleNaN := math.Float64frombits(value.StaleNaN) + tests := []struct { + a float64 + b float64 + epsilon float64 + want bool + }{ + {0.0, 0.0, 0.0, true}, + {0.0, 0.1, 0.0, false}, + {1.0, 1.1, 0.1, true}, + {-1.0, -1.1, 0.1, true}, + {math.MaxFloat64, math.MaxFloat64 / 10, 0.1, false}, + {1.0, math.NaN(), 0.1, false}, + {math.NaN(), math.NaN(), 0.1, true}, + {math.NaN(), staleNaN, 0.1, false}, + {staleNaN, math.NaN(), 0.1, false}, + {staleNaN, staleNaN, 0.1, true}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%v,%v,%v", tt.a, tt.b, tt.epsilon), func(t *testing.T) { + if got := Equal(tt.a, tt.b, tt.epsilon); got != tt.want { + t.Errorf("Equal(%v,%v,%v) = %v, want %v", tt.a, tt.b, tt.epsilon, got, tt.want) + } + }) + } +} diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index bc5d76db4..b0272b7fe 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -174,7 +174,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error { } } -// NewInvalidQuantileWarning is used when the user specifies an invalid ratio +// NewInvalidRatioWarning is used when the user specifies an invalid ratio // value, i.e. a float that is outside the range [-1, 1] or NaN. func NewInvalidRatioWarning(q, to float64, pos posrange.PositionRange) error { return annoErr{ diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index 9de2bb8d4..6964952af 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -23,6 +23,7 @@ import ( "bytes" "fmt" "io" + "reflect" "strings" "github.com/alecthomas/kingpin/v2" @@ -75,6 +76,16 @@ func createFlagRow(flag *kingpin.FlagModel) []string { name = fmt.Sprintf(`-%c, --%s`, flag.Short, flag.Name) } + valueType := reflect.TypeOf(flag.Value) + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + } + if valueType.Kind() == reflect.Struct { + if _, found := valueType.FieldByName("slice"); found { + name = fmt.Sprintf(`%s ...`, name) + } + } + return []string{name, strings.ReplaceAll(flag.Help, "|", `\|`), defaultVal} } diff --git a/util/netconnlimit/netconnlimit.go b/util/netconnlimit/netconnlimit.go new file mode 100644 index 000000000..3bdd805b8 --- /dev/null +++ b/util/netconnlimit/netconnlimit.go @@ -0,0 +1,97 @@ +// Copyright 2024 The Prometheus Authors +// Based on golang.org/x/net/netutil: +// Copyright 2013 The Go Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package netconnlimit provides network utility functions for limiting +// simultaneous connections across multiple listeners. +package netconnlimit + +import ( + "net" + "sync" +) + +// NewSharedSemaphore creates and returns a new semaphore channel that can be used +// to limit the number of simultaneous connections across multiple listeners. +func NewSharedSemaphore(n int) chan struct{} { + return make(chan struct{}, n) +} + +// SharedLimitListener returns a listener that accepts at most n simultaneous +// connections across multiple listeners using the provided shared semaphore. +func SharedLimitListener(l net.Listener, sem chan struct{}) net.Listener { + return &sharedLimitListener{ + Listener: l, + sem: sem, + done: make(chan struct{}), + } +} + +type sharedLimitListener struct { + net.Listener + sem chan struct{} + closeOnce sync.Once // Ensures the done chan is only closed once. + done chan struct{} // No values sent; closed when Close is called. +} + +// Acquire acquires the shared semaphore. Returns true if successfully +// acquired, false if the listener is closed and the semaphore is not +// acquired. +func (l *sharedLimitListener) acquire() bool { + select { + case <-l.done: + return false + case l.sem <- struct{}{}: + return true + } +} + +func (l *sharedLimitListener) release() { <-l.sem } + +func (l *sharedLimitListener) Accept() (net.Conn, error) { + if !l.acquire() { + for { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + c.Close() + } + } + + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &sharedLimitListenerConn{Conn: c, release: l.release}, nil +} + +func (l *sharedLimitListener) Close() error { + err := l.Listener.Close() + l.closeOnce.Do(func() { close(l.done) }) + return err +} + +type sharedLimitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *sharedLimitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/util/netconnlimit/netconnlimit_test.go b/util/netconnlimit/netconnlimit_test.go new file mode 100644 index 000000000..e4d490420 --- /dev/null +++ b/util/netconnlimit/netconnlimit_test.go @@ -0,0 +1,124 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package netconnlimit + +import ( + "io" + "net" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSharedLimitListenerConcurrency(t *testing.T) { + testCases := []struct { + name string + semCapacity int + connCount int + expected int // Expected number of connections processed simultaneously. + }{ + { + name: "Single connection allowed", + semCapacity: 1, + connCount: 3, + expected: 1, + }, + { + name: "Two connections allowed", + semCapacity: 2, + connCount: 3, + expected: 2, + }, + { + name: "Three connections allowed", + semCapacity: 3, + connCount: 3, + expected: 3, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sem := NewSharedSemaphore(tc.semCapacity) + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err, "failed to create listener") + defer listener.Close() + + limitedListener := SharedLimitListener(listener, sem) + + var wg sync.WaitGroup + var activeConnCount int64 + var mu sync.Mutex + + wg.Add(tc.connCount) + + // Accept connections. + for i := 0; i < tc.connCount; i++ { + go func() { + defer wg.Done() + + conn, err := limitedListener.Accept() + require.NoError(t, err, "failed to accept connection") + defer conn.Close() + + // Simulate work and track the active connection count. + mu.Lock() + activeConnCount++ + require.LessOrEqual(t, activeConnCount, int64(tc.expected), "too many simultaneous connections") + mu.Unlock() + + time.Sleep(100 * time.Millisecond) + + mu.Lock() + activeConnCount-- + mu.Unlock() + }() + } + + // Create clients that attempt to connect to the listener. + for i := 0; i < tc.connCount; i++ { + go func() { + conn, err := net.Dial("tcp", listener.Addr().String()) + require.NoError(t, err, "failed to connect to listener") + defer conn.Close() + _, _ = io.WriteString(conn, "hello") + }() + } + + wg.Wait() + + // Ensure all connections are released and semaphore is empty. + require.Empty(t, sem) + }) + } +} + +func TestSharedLimitListenerClose(t *testing.T) { + sem := NewSharedSemaphore(2) + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err, "failed to create listener") + + limitedListener := SharedLimitListener(listener, sem) + + // Close the listener and ensure it does not accept new connections. + err = limitedListener.Close() + require.NoError(t, err, "failed to close listener") + + conn, err := limitedListener.Accept() + require.Error(t, err, "expected error on accept after listener closed") + if conn != nil { + conn.Close() + } +} diff --git a/util/testutil/cmp.go b/util/testutil/cmp.go index 370d191f3..24d39d514 100644 --- a/util/testutil/cmp.go +++ b/util/testutil/cmp.go @@ -23,13 +23,14 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -// Replacement for require.Equal using go-cmp adapted for Prometheus data structures, instead of DeepEqual. +// RequireEqual is a replacement for require.Equal using go-cmp adapted for +// Prometheus data structures, instead of DeepEqual. func RequireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) { t.Helper() RequireEqualWithOptions(t, expected, actual, nil, msgAndArgs...) } -// As RequireEqual but allows extra cmp.Options. +// RequireEqualWithOptions works like RequireEqual but allows extra cmp.Options. func RequireEqualWithOptions(t testing.TB, expected, actual interface{}, extra []cmp.Option, msgAndArgs ...interface{}) { t.Helper() options := append([]cmp.Option{cmp.Comparer(labels.Equal)}, extra...) diff --git a/util/testutil/directory.go b/util/testutil/directory.go index 8aa17702d..38dabd183 100644 --- a/util/testutil/directory.go +++ b/util/testutil/directory.go @@ -155,7 +155,7 @@ func DirHash(t *testing.T, path string) []byte { modTime, err := info.ModTime().GobEncode() require.NoError(t, err) - _, err = io.WriteString(hash, string(modTime)) + _, err = hash.Write(modTime) require.NoError(t, err) return nil }) diff --git a/web/api/v1/api_test.go b/web/api/v1/api_test.go index ba38ddc97..ef9d53dd9 100644 --- a/web/api/v1/api_test.go +++ b/web/api/v1/api_test.go @@ -59,16 +59,19 @@ import ( "github.com/prometheus/prometheus/util/teststorage" ) -var testEngine = promql.NewEngine(promql.EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, - NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 }, - EnableAtModifier: true, - EnableNegativeOffset: true, - EnablePerStepStats: true, -}) +func testEngine(t *testing.T) *promql.Engine { + t.Helper() + return promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{ + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + NoStepSubqueryIntervalFn: func(int64) int64 { return 60 * 1000 }, + EnableAtModifier: true, + EnableNegativeOffset: true, + EnablePerStepStats: true, + }) +} // testMetaStore satisfies the scrape.MetricMetadataStore interface. // It is used to inject specific metadata as part of a test case. @@ -306,8 +309,7 @@ func (m *rulesRetrieverMock) CreateRuleGroups() { MaxSamples: 10, Timeout: 100 * time.Second, } - - engine := promql.NewEngine(engineOpts) + engine := promqltest.NewTestEngineWithOpts(m.testing, engineOpts) opts := &rules.ManagerOptions{ QueryFunc: rules.EngineQueryFunc(engine, storage), Appendable: storage, @@ -431,9 +433,10 @@ func TestEndpoints(t *testing.T) { now := time.Now() + ng := testEngine(t) + t.Run("local", func(t *testing.T) { - algr := rulesRetrieverMock{} - algr.testing = t + algr := rulesRetrieverMock{testing: t} algr.CreateAlertingRules() algr.CreateRuleGroups() @@ -445,7 +448,7 @@ func TestEndpoints(t *testing.T) { api := &API{ Queryable: storage, - QueryEngine: testEngine, + QueryEngine: ng, ExemplarQueryable: storage.ExemplarQueryable(), targetRetriever: testTargetRetriever.toFactory(), alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(), @@ -496,8 +499,7 @@ func TestEndpoints(t *testing.T) { }) require.NoError(t, err) - algr := rulesRetrieverMock{} - algr.testing = t + algr := rulesRetrieverMock{testing: t} algr.CreateAlertingRules() algr.CreateRuleGroups() @@ -509,7 +511,7 @@ func TestEndpoints(t *testing.T) { api := &API{ Queryable: remote, - QueryEngine: testEngine, + QueryEngine: ng, ExemplarQueryable: storage.ExemplarQueryable(), targetRetriever: testTargetRetriever.toFactory(), alertmanagerRetriever: testAlertmanagerRetriever{}.toFactory(), @@ -651,7 +653,7 @@ func TestQueryExemplars(t *testing.T) { api := &API{ Queryable: storage, - QueryEngine: testEngine, + QueryEngine: testEngine(t), ExemplarQueryable: storage.ExemplarQueryable(), } @@ -870,7 +872,7 @@ func TestStats(t *testing.T) { api := &API{ Queryable: storage, - QueryEngine: testEngine, + QueryEngine: testEngine(t), now: func() time.Time { return time.Unix(123, 0) }, @@ -1074,6 +1076,9 @@ func setupRemote(s storage.Storage) *httptest.Server { } } + w.Header().Set("Content-Type", "application/x-protobuf") + w.Header().Set("Content-Encoding", "snappy") + if err := remote.EncodeReadResponse(&resp, w); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return diff --git a/web/api/v1/errors_test.go b/web/api/v1/errors_test.go index 99ef81018..7e1fc09d8 100644 --- a/web/api/v1/errors_test.go +++ b/web/api/v1/errors_test.go @@ -32,6 +32,7 @@ import ( "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" @@ -86,7 +87,7 @@ func TestApiStatusCodes(t *testing.T) { "error from seriesset": errorTestQueryable{q: errorTestQuerier{s: errorTestSeriesSet{err: tc.err}}}, } { t.Run(fmt.Sprintf("%s/%s", name, k), func(t *testing.T) { - r := createPrometheusAPI(q) + r := createPrometheusAPI(t, q) rec := httptest.NewRecorder() req := httptest.NewRequest(http.MethodGet, "/api/v1/query?query=up", nil) @@ -100,8 +101,10 @@ func TestApiStatusCodes(t *testing.T) { } } -func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { - engine := promql.NewEngine(promql.EngineOpts{ +func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route.Router { + t.Helper() + + engine := promqltest.NewTestEngineWithOpts(t, promql.EngineOpts{ Logger: log.NewNopLogger(), Reg: nil, ActiveQueryTracker: nil, diff --git a/web/ui/module/codemirror-promql/package.json b/web/ui/module/codemirror-promql/package.json index 0cce9ca46..e8fe7c180 100644 --- a/web/ui/module/codemirror-promql/package.json +++ b/web/ui/module/codemirror-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.1", + "version": "0.54.1", "description": "a CodeMirror mode for the PromQL language", "types": "dist/esm/index.d.ts", "module": "dist/esm/index.js", @@ -29,7 +29,7 @@ }, "homepage": "https://github.com/prometheus/prometheus/blob/main/web/ui/module/codemirror-promql/README.md", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.1", + "@prometheus-io/lezer-promql": "0.54.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -37,10 +37,10 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "eslint-plugin-prettier": "^5.1.3", "isomorphic-fetch": "^3.0.0", "nock": "^13.5.4" diff --git a/web/ui/module/lezer-promql/package.json b/web/ui/module/lezer-promql/package.json index 44a7a9259..8093936dc 100644 --- a/web/ui/module/lezer-promql/package.json +++ b/web/ui/module/lezer-promql/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/lezer-promql", - "version": "0.53.1", + "version": "0.54.1", "description": "lezer-based PromQL grammar", "main": "dist/index.cjs", "type": "module", @@ -32,7 +32,7 @@ "devDependencies": { "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "@rollup/plugin-node-resolve": "^15.2.3" }, "peerDependencies": { diff --git a/web/ui/package-lock.json b/web/ui/package-lock.json index 26cadf8bc..00736ad63 100644 --- a/web/ui/package-lock.json +++ b/web/ui/package-lock.json @@ -1,12 +1,12 @@ { "name": "prometheus-io", - "version": "0.53.1", + "version": "0.54.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "prometheus-io", - "version": "0.53.1", + "version": "0.54.1", "workspaces": [ "mantine-ui", "module/*" @@ -171,10 +171,10 @@ }, "module/codemirror-promql": { "name": "@prometheus-io/codemirror-promql", - "version": "0.53.1", + "version": "0.54.1", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.1", + "@prometheus-io/lezer-promql": "0.54.1", "lru-cache": "^7.18.3" }, "devDependencies": { @@ -182,10 +182,10 @@ "@codemirror/language": "^6.10.2", "@codemirror/lint": "^6.8.1", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "eslint-plugin-prettier": "^5.1.3", "isomorphic-fetch": "^3.0.0", "nock": "^13.5.4" @@ -204,12 +204,12 @@ }, "module/lezer-promql": { "name": "@prometheus-io/lezer-promql", - "version": "0.53.1", + "version": "0.54.1", "license": "Apache-2.0", "devDependencies": { "@lezer/generator": "^1.7.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "@rollup/plugin-node-resolve": "^15.2.3" }, "peerDependencies": { diff --git a/web/ui/package.json b/web/ui/package.json index dfa933f26..ff5493e43 100644 --- a/web/ui/package.json +++ b/web/ui/package.json @@ -1,7 +1,7 @@ { "name": "prometheus-io", "description": "Monorepo for the Prometheus UI", - "version": "0.53.1", + "version": "0.54.1", "private": true, "scripts": { "build": "bash build_ui.sh --all", diff --git a/web/ui/react-app/package-lock.json b/web/ui/react-app/package-lock.json index 1482466e7..d456ca1f0 100644 --- a/web/ui/react-app/package-lock.json +++ b/web/ui/react-app/package-lock.json @@ -1,12 +1,12 @@ { "name": "@prometheus-io/app", - "version": "0.53.1", + "version": "0.54.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@prometheus-io/app", - "version": "0.53.1", + "version": "0.54.1", "dependencies": { "@codemirror/autocomplete": "^6.17.0", "@codemirror/commands": "^6.6.0", @@ -14,17 +14,17 @@ "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/react-fontawesome": "0.2.0", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.1", + "@prometheus-io/codemirror-promql": "0.54.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", @@ -2307,9 +2307,9 @@ "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==" }, "node_modules/@codemirror/view": { - "version": "6.28.4", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.4.tgz", - "integrity": "sha512-QScv95fiviSQ/CaVGflxAvvvDy/9wi0RFyDl4LkHHWiMr/UPebyuTspmYSeN5Nx6eujcPYwsQzA6ZIZucKZVHQ==", + "version": "6.33.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.33.0.tgz", + "integrity": "sha512-AroaR3BvnjRW8fiZBalAaK+ZzB5usGgI014YKElYZvQdNH5ZIidHlO+cyf/2rWzyBFRkvG6VhiXeAEbC53P2YQ==", "license": "MIT", "dependencies": { "@codemirror/state": "^6.4.0", @@ -4193,9 +4193,9 @@ } }, "node_modules/@lezer/lr": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz", - "integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz", + "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==", "license": "MIT", "dependencies": { "@lezer/common": "^1.0.0" @@ -4341,12 +4341,12 @@ } }, "node_modules/@prometheus-io/codemirror-promql": { - "version": "0.53.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.53.1.tgz", - "integrity": "sha512-+5DjIyrnbtnfen6WczbRvWOhhUxD1szVsL0xKPxc2HflehVRNplZJ9LDM+4XW/wPi+DySEFMq7E7mHX7Sxv9Cw==", + "version": "0.54.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/codemirror-promql/-/codemirror-promql-0.54.1.tgz", + "integrity": "sha512-CkU5d+Nhbj+VjTYSlicIcFeL3KUYyEco/VHK+qM4TXgPQJxP04MCi642UVgLeuy9exThkCObj5oDJcApSNmxBw==", "license": "Apache-2.0", "dependencies": { - "@prometheus-io/lezer-promql": "0.53.1", + "@prometheus-io/lezer-promql": "0.54.1", "lru-cache": "^7.18.3" }, "engines": { @@ -4362,9 +4362,9 @@ } }, "node_modules/@prometheus-io/lezer-promql": { - "version": "0.53.1", - "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.53.1.tgz", - "integrity": "sha512-gQJm88qAEntXrervWuWl6auuljcSg4CXbrbe/xfRsqxB1C0aWYbx9ZaUdBGf2vaulZjZtxi1e2tIlkRmjfDi7g==", + "version": "0.54.1", + "resolved": "https://registry.npmjs.org/@prometheus-io/lezer-promql/-/lezer-promql-0.54.1.tgz", + "integrity": "sha512-+QdeoN/PttM1iBeRtwSQWoaDIwnIgT9oIueTbAlvL01WM2eluD8j9vNiD0oJFzbcZ5clxwhvMP54InIt3vJaMg==", "license": "Apache-2.0", "peerDependencies": { "@lezer/highlight": "^1.1.2", diff --git a/web/ui/react-app/package.json b/web/ui/react-app/package.json index 977eb51d2..c194e8335 100644 --- a/web/ui/react-app/package.json +++ b/web/ui/react-app/package.json @@ -1,6 +1,6 @@ { "name": "@prometheus-io/app", - "version": "0.53.1", + "version": "0.54.1", "private": true, "dependencies": { "@codemirror/autocomplete": "^6.17.0", @@ -9,17 +9,17 @@ "@codemirror/lint": "^6.8.1", "@codemirror/search": "^6.5.6", "@codemirror/state": "^6.3.3", - "@codemirror/view": "^6.28.3", + "@codemirror/view": "^6.29.1", "@forevolve/bootstrap-dark": "^4.0.2", "@fortawesome/fontawesome-svg-core": "6.5.2", "@fortawesome/free-solid-svg-icons": "6.5.2", "@fortawesome/react-fontawesome": "0.2.0", "@lezer/common": "^1.2.1", "@lezer/highlight": "^1.2.0", - "@lezer/lr": "^1.4.1", + "@lezer/lr": "^1.4.2", "@nexucis/fuzzy": "^0.4.1", "@nexucis/kvsearch": "^0.8.1", - "@prometheus-io/codemirror-promql": "0.53.1", + "@prometheus-io/codemirror-promql": "0.54.1", "bootstrap": "^4.6.2", "css.escape": "^1.5.1", "downshift": "^9.0.6", diff --git a/web/web.go b/web/web.go index 37a7d71bb..fb8d3608d 100644 --- a/web/web.go +++ b/web/web.go @@ -49,7 +49,6 @@ import ( toolkit_web "github.com/prometheus/exporter-toolkit/web" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.uber.org/atomic" - "golang.org/x/net/netutil" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/notifier" @@ -59,6 +58,7 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/template" "github.com/prometheus/prometheus/util/httputil" + "github.com/prometheus/prometheus/util/netconnlimit" api_v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/prometheus/prometheus/web/ui" ) @@ -244,7 +244,7 @@ type Options struct { Version *PrometheusVersion Flags map[string]string - ListenAddress string + ListenAddresses []string CORSOrigin *regexp.Regexp ReadTimeout time.Duration MaxConnections int @@ -335,7 +335,7 @@ func New(logger log.Logger, o *Options) *Handler { }, o.Flags, api_v1.GlobalURLOptions{ - ListenAddress: o.ListenAddress, + ListenAddress: o.ListenAddresses[0], Host: o.ExternalURL.Host, Scheme: o.ExternalURL.Scheme, }, @@ -495,14 +495,14 @@ func New(logger log.Logger, o *Options) *Handler { router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, o.AppName+" is Healthy.\n") + fmt.Fprintf(w, "%s is Healthy.\n", o.AppName) }) router.Head("/-/healthy", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusOK) }) router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, o.AppName+" is Ready.\n") + fmt.Fprintf(w, "%s is Ready.\n", o.AppName) })) router.Head("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -580,15 +580,29 @@ func (h *Handler) Reload() <-chan chan error { return h.reloadCh } -// Listener creates the TCP listener for web requests. -func (h *Handler) Listener() (net.Listener, error) { - level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress) +// Listeners creates the TCP listeners for web requests. +func (h *Handler) Listeners() ([]net.Listener, error) { + var listeners []net.Listener + sem := netconnlimit.NewSharedSemaphore(h.options.MaxConnections) + for _, address := range h.options.ListenAddresses { + listener, err := h.Listener(address, sem) + if err != nil { + return listeners, err + } + listeners = append(listeners, listener) + } + return listeners, nil +} - listener, err := net.Listen("tcp", h.options.ListenAddress) +// Listener creates the TCP listener for web requests. +func (h *Handler) Listener(address string, sem chan struct{}) (net.Listener, error) { + level.Info(h.logger).Log("msg", "Start listening for connections", "address", address) + + listener, err := net.Listen("tcp", address) if err != nil { return listener, err } - listener = netutil.LimitListener(listener, h.options.MaxConnections) + listener = netconnlimit.SharedLimitListener(listener, sem) // Monitor incoming connections with conntrack. listener = conntrack.NewListener(listener, @@ -599,10 +613,10 @@ func (h *Handler) Listener() (net.Listener, error) { } // Run serves the HTTP endpoints. -func (h *Handler) Run(ctx context.Context, listener net.Listener, webConfig string) error { - if listener == nil { +func (h *Handler) Run(ctx context.Context, listeners []net.Listener, webConfig string) error { + if len(listeners) == 0 { var err error - listener, err = h.Listener() + listeners, err = h.Listeners() if err != nil { return err } @@ -637,7 +651,7 @@ func (h *Handler) Run(ctx context.Context, listener net.Listener, webConfig stri errCh := make(chan error, 1) go func() { - errCh <- toolkit_web.Serve(listener, httpSrv, &toolkit_web.FlagConfig{WebConfigFile: &webConfig}, h.logger) + errCh <- toolkit_web.ServeMultiple(listeners, httpSrv, &toolkit_web.FlagConfig{WebConfigFile: &webConfig}, h.logger) }() select { diff --git a/web/web_test.go b/web/web_test.go index e1fa66fa8..b660746b1 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -73,19 +73,19 @@ func TestReadyAndHealthy(t *testing.T) { port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) opts := &Options{ - ListenAddress: port, - ReadTimeout: 30 * time.Second, - MaxConnections: 512, - Context: nil, - Storage: nil, - LocalStorage: &dbAdapter{db}, - TSDBDir: dbDir, - QueryEngine: nil, - ScrapeManager: &scrape.Manager{}, - RuleManager: &rules.Manager{}, - Notifier: nil, - RoutePrefix: "/", - EnableAdminAPI: true, + ListenAddresses: []string{port}, + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + Storage: nil, + LocalStorage: &dbAdapter{db}, + TSDBDir: dbDir, + QueryEngine: nil, + ScrapeManager: &scrape.Manager{}, + RuleManager: &rules.Manager{}, + Notifier: nil, + RoutePrefix: "/", + EnableAdminAPI: true, ExternalURL: &url.URL{ Scheme: "http", Host: "localhost" + port, @@ -101,9 +101,9 @@ func TestReadyAndHealthy(t *testing.T) { webHandler.config = &config.Config{} webHandler.notifier = ¬ifier.Manager{} - l, err := webHandler.Listener() + l, err := webHandler.Listeners() if err != nil { - panic(fmt.Sprintf("Unable to start web listener: %s", err)) + panic(fmt.Sprintf("Unable to start web listeners: %s", err)) } ctx, cancel := context.WithCancel(context.Background()) @@ -198,19 +198,19 @@ func TestRoutePrefix(t *testing.T) { port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) opts := &Options{ - ListenAddress: port, - ReadTimeout: 30 * time.Second, - MaxConnections: 512, - Context: nil, - TSDBDir: dbDir, - LocalStorage: &dbAdapter{db}, - Storage: nil, - QueryEngine: nil, - ScrapeManager: nil, - RuleManager: nil, - Notifier: nil, - RoutePrefix: "/prometheus", - EnableAdminAPI: true, + ListenAddresses: []string{port}, + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + TSDBDir: dbDir, + LocalStorage: &dbAdapter{db}, + Storage: nil, + QueryEngine: nil, + ScrapeManager: nil, + RuleManager: nil, + Notifier: nil, + RoutePrefix: "/prometheus", + EnableAdminAPI: true, ExternalURL: &url.URL{ Host: "localhost.localdomain" + port, Scheme: "http", @@ -220,9 +220,9 @@ func TestRoutePrefix(t *testing.T) { opts.Flags = map[string]string{} webHandler := New(nil, opts) - l, err := webHandler.Listener() + l, err := webHandler.Listeners() if err != nil { - panic(fmt.Sprintf("Unable to start web listener: %s", err)) + panic(fmt.Sprintf("Unable to start web listeners: %s", err)) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -299,8 +299,8 @@ func TestDebugHandler(t *testing.T) { {"/foo", "/bar/debug/pprof/goroutine", 404}, } { opts := &Options{ - RoutePrefix: tc.prefix, - ListenAddress: "somehost:9090", + RoutePrefix: tc.prefix, + ListenAddresses: []string{"somehost:9090"}, ExternalURL: &url.URL{ Host: "localhost.localdomain:9090", Scheme: "http", @@ -324,8 +324,8 @@ func TestDebugHandler(t *testing.T) { func TestHTTPMetrics(t *testing.T) { t.Parallel() handler := New(nil, &Options{ - RoutePrefix: "/", - ListenAddress: "somehost:9090", + RoutePrefix: "/", + ListenAddresses: []string{"somehost:9090"}, ExternalURL: &url.URL{ Host: "localhost.localdomain:9090", Scheme: "http", @@ -381,18 +381,18 @@ func TestShutdownWithStaleConnection(t *testing.T) { port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) opts := &Options{ - ListenAddress: port, - ReadTimeout: timeout, - MaxConnections: 512, - Context: nil, - Storage: nil, - LocalStorage: &dbAdapter{db}, - TSDBDir: dbDir, - QueryEngine: nil, - ScrapeManager: &scrape.Manager{}, - RuleManager: &rules.Manager{}, - Notifier: nil, - RoutePrefix: "/", + ListenAddresses: []string{port}, + ReadTimeout: timeout, + MaxConnections: 512, + Context: nil, + Storage: nil, + LocalStorage: &dbAdapter{db}, + TSDBDir: dbDir, + QueryEngine: nil, + ScrapeManager: &scrape.Manager{}, + RuleManager: &rules.Manager{}, + Notifier: nil, + RoutePrefix: "/", ExternalURL: &url.URL{ Scheme: "http", Host: "localhost" + port, @@ -408,9 +408,9 @@ func TestShutdownWithStaleConnection(t *testing.T) { webHandler.config = &config.Config{} webHandler.notifier = ¬ifier.Manager{} - l, err := webHandler.Listener() + l, err := webHandler.Listeners() if err != nil { - panic(fmt.Sprintf("Unable to start web listener: %s", err)) + panic(fmt.Sprintf("Unable to start web listeners: %s", err)) } closed := make(chan struct{}) @@ -448,7 +448,7 @@ func TestHandleMultipleQuitRequests(t *testing.T) { port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) opts := &Options{ - ListenAddress: port, + ListenAddresses: []string{port}, MaxConnections: 512, EnableLifecycle: true, RoutePrefix: "/", @@ -461,9 +461,9 @@ func TestHandleMultipleQuitRequests(t *testing.T) { webHandler := New(nil, opts) webHandler.config = &config.Config{} webHandler.notifier = ¬ifier.Manager{} - l, err := webHandler.Listener() + l, err := webHandler.Listeners() if err != nil { - panic(fmt.Sprintf("Unable to start web listener: %s", err)) + panic(fmt.Sprintf("Unable to start web listeners: %s", err)) } ctx, cancel := context.WithCancel(context.Background()) closed := make(chan struct{}) @@ -513,17 +513,17 @@ func TestAgentAPIEndPoints(t *testing.T) { port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) opts := &Options{ - ListenAddress: port, - ReadTimeout: 30 * time.Second, - MaxConnections: 512, - Context: nil, - Storage: nil, - QueryEngine: nil, - ScrapeManager: &scrape.Manager{}, - RuleManager: &rules.Manager{}, - Notifier: nil, - RoutePrefix: "/", - EnableAdminAPI: true, + ListenAddresses: []string{port}, + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + Storage: nil, + QueryEngine: nil, + ScrapeManager: &scrape.Manager{}, + RuleManager: &rules.Manager{}, + Notifier: nil, + RoutePrefix: "/", + EnableAdminAPI: true, ExternalURL: &url.URL{ Scheme: "http", Host: "localhost" + port, @@ -540,9 +540,9 @@ func TestAgentAPIEndPoints(t *testing.T) { webHandler.SetReady(true) webHandler.config = &config.Config{} webHandler.notifier = ¬ifier.Manager{} - l, err := webHandler.Listener() + l, err := webHandler.Listeners() if err != nil { - panic(fmt.Sprintf("Unable to start web listener: %s", err)) + panic(fmt.Sprintf("Unable to start web listeners: %s", err)) } ctx, cancel := context.WithCancel(context.Background()) @@ -628,3 +628,83 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) { require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name))) require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots"))) } + +func TestMultipleListenAddresses(t *testing.T) { + t.Parallel() + + dbDir := t.TempDir() + + db, err := tsdb.Open(dbDir, nil, nil, nil, nil) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, db.Close()) + }) + + // Create multiple ports for testing multiple ListenAddresses + port1 := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) + port2 := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t)) + + opts := &Options{ + ListenAddresses: []string{port1, port2}, + ReadTimeout: 30 * time.Second, + MaxConnections: 512, + Context: nil, + Storage: nil, + LocalStorage: &dbAdapter{db}, + TSDBDir: dbDir, + QueryEngine: nil, + ScrapeManager: &scrape.Manager{}, + RuleManager: &rules.Manager{}, + Notifier: nil, + RoutePrefix: "/", + EnableAdminAPI: true, + ExternalURL: &url.URL{ + Scheme: "http", + Host: "localhost" + port1, + Path: "/", + }, + Version: &PrometheusVersion{}, + Gatherer: prometheus.DefaultGatherer, + } + + opts.Flags = map[string]string{} + + webHandler := New(nil, opts) + + webHandler.config = &config.Config{} + webHandler.notifier = ¬ifier.Manager{} + l, err := webHandler.Listeners() + if err != nil { + panic(fmt.Sprintf("Unable to start web listener: %s", err)) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + err := webHandler.Run(ctx, l, "") + if err != nil { + panic(fmt.Sprintf("Can't start web handler:%s", err)) + } + }() + + // Give some time for the web goroutine to run since we need the server + // to be up before starting tests. + time.Sleep(5 * time.Second) + + // Set to ready. + webHandler.SetReady(true) + + for _, port := range []string{port1, port2} { + baseURL := "http://localhost" + port + + resp, err := http.Get(baseURL + "/-/healthy") + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + cleanupTestResponse(t, resp) + + resp, err = http.Get(baseURL + "/-/ready") + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + cleanupTestResponse(t, resp) + } +}