Merge branch 'main' into nhcb-scrape-impl

# Conflicts:
#	scrape/scrape.go
This commit is contained in:
György Krajcsovits 2024-10-07 11:23:44 +02:00
commit ed2e7dc258
110 changed files with 3431 additions and 1632 deletions

View file

@ -1,4 +1,4 @@
blank_issues_enabled: false blank_issues_enabled: true
contact_links: contact_links:
- name: Prometheus Community Support - name: Prometheus Community Support
url: https://prometheus.io/community/ url: https://prometheus.io/community/

View file

@ -12,8 +12,8 @@ jobs:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -12,8 +12,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 - uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1

View file

@ -13,7 +13,7 @@ jobs:
# should also be updated. # should also be updated.
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
@ -29,7 +29,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./... - run: go test --tags=dedupelabels ./...
@ -48,7 +48,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go. # The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.22-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- run: make build - run: make build
# Don't run NPM build; don't run race-detector. # Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags="" - run: make test GO_ONLY=1 test-flags=""
@ -62,7 +62,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
@ -79,7 +79,7 @@ jobs:
name: Go tests on Windows name: Go tests on Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
go-version: 1.23.x go-version: 1.23.x
@ -96,7 +96,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder:1.23-base image: quay.io/prometheus/golang-builder:1.23-base
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- run: go install ./cmd/promtool/. - run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -121,7 +121,7 @@ jobs:
matrix: matrix:
thread: [ 0, 1, 2 ] thread: [ 0, 1, 2 ]
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
@ -146,7 +146,7 @@ jobs:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
@ -169,7 +169,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
@ -182,7 +182,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:
@ -208,7 +208,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/publish_main - uses: ./.github/promci/actions/publish_main
with: with:
@ -225,7 +225,7 @@ jobs:
|| ||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.')) (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- uses: ./.github/promci/actions/publish_release - uses: ./.github/promci/actions/publish_release
with: with:
@ -240,10 +240,10 @@ jobs:
needs: [test_ui, codeql] needs: [test_ui, codeql]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4 - uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
- name: Install nodejs - name: Install nodejs
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
with: with:
node-version-file: "web/ui/.nvmrc" node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"

View file

@ -24,15 +24,15 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6 uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10

View file

@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Set docker hub repo name - name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub - name: Push README to Dockerhub
@ -40,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Set quay.io org name - name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name - name: Set quay.io repo name

View file

@ -13,7 +13,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder image: quay.io/prometheus/golang-builder
steps: steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- run: ./scripts/sync_repo_files.sh - run: ./scripts/sync_repo_files.sh
env: env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: "Checkout code" - name: "Checkout code"
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0
with: with:
persist-credentials: false persist-credentials: false
@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6 uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View file

@ -153,14 +153,4 @@ linters-settings:
disable: disable:
- float-compare - float-compare
- go-require - go-require
enable: enable-all: true
- bool-compare
- compares
- empty
- error-is-as
- error-nil
- expected-actual
- len
- require-error
- suite-dont-use-pkg
- suite-extra-assert-call

View file

@ -3,6 +3,7 @@
## unreleased ## unreleased
* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930 * [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930
* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910 * [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
## 3.0.0-beta.0 / 2024-09-05 ## 3.0.0-beta.0 / 2024-09-05

View file

@ -2,7 +2,6 @@
General maintainers: General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham) * Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424) * Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie) * Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
@ -17,7 +16,7 @@ Maintainers for specific parts of the codebase:
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama) George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `storage` * `storage`
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank) * `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `otlptranslator`: Arthur Silva Sens (<arthursens2005@gmail.com> / @ArthurSens), Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `web` * `web`
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv) * `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)

View file

@ -22,7 +22,6 @@ import (
"math/bits" "math/bits"
"net" "net"
"net/http" "net/http"
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
"net/url" "net/url"
"os" "os"
"os/signal" "os/signal"
@ -77,6 +76,7 @@ import (
"github.com/prometheus/prometheus/tsdb/wlog" "github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/documentcli"
"github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/logging"
"github.com/prometheus/prometheus/util/notifications"
prom_runtime "github.com/prometheus/prometheus/util/runtime" prom_runtime "github.com/prometheus/prometheus/util/runtime"
"github.com/prometheus/prometheus/web" "github.com/prometheus/prometheus/web"
) )
@ -153,6 +153,7 @@ type flagConfig struct {
queryMaxSamples int queryMaxSamples int
RemoteFlushDeadline model.Duration RemoteFlushDeadline model.Duration
nameEscapingScheme string nameEscapingScheme string
maxNotificationsSubscribers int
enableAutoReload bool enableAutoReload bool
autoReloadInterval model.Duration autoReloadInterval model.Duration
@ -181,9 +182,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "otlp-write-receiver":
c.web.EnableOTLPWriteReceiver = true
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
case "expand-external-labels": case "expand-external-labels":
c.enableExpandExternalLabels = true c.enableExpandExternalLabels = true
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
@ -217,9 +215,6 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "concurrent-rule-eval": case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true c.enableConcurrentRuleEval = true
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
case "no-default-scrape-port":
c.scrape.NoDefaultPort = true
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
case "promql-experimental-functions": case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
@ -318,6 +313,9 @@ func main() {
a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners."). a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners.").
Default("512").IntVar(&cfg.web.MaxConnections) Default("512").IntVar(&cfg.web.MaxConnections)
a.Flag("web.max-notifications-subscribers", "Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close.").
Default("16").IntVar(&cfg.maxNotificationsSubscribers)
a.Flag("web.external-url", a.Flag("web.external-url",
"The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically."). "The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.").
PlaceHolder("<URL>").StringVar(&cfg.prometheusURL) PlaceHolder("<URL>").StringVar(&cfg.prometheusURL)
@ -344,6 +342,9 @@ func main() {
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())). a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs)) Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
a.Flag("web.enable-otlp-receiver", "Enable API endpoint accepting OTLP write requests.").
Default("false").BoolVar(&cfg.web.EnableOTLPWriteReceiver)
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles."). a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath) Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
@ -383,6 +384,9 @@ func main() {
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory."). serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
Default("false").BoolVar(&cfg.tsdb.NoLockfile) Default("false").BoolVar(&cfg.tsdb.NoLockfile)
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL."). serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression) Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
@ -474,7 +478,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
@ -499,6 +503,11 @@ func main() {
logger := promlog.New(&cfg.promlogConfig) logger := promlog.New(&cfg.promlogConfig)
notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer)
cfg.web.NotificationsSub = notifs.Sub
cfg.web.NotificationsGetter = notifs.Get
notifs.AddNotification(notifications.StartingUp)
if err := cfg.setFeatureListOptions(logger); err != nil { if err := cfg.setFeatureListOptions(logger); err != nil {
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err)) fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err))
os.Exit(1) os.Exit(1)
@ -984,6 +993,7 @@ func main() {
func(err error) { func(err error) {
close(cancel) close(cancel)
webHandler.SetReady(web.Stopping) webHandler.SetReady(web.Stopping)
notifs.AddNotification(notifications.ShuttingDown)
}, },
) )
} }
@ -1082,6 +1092,14 @@ func main() {
} }
} }
callback := func(success bool) {
if success {
notifs.DeleteNotification(notifications.ConfigurationUnsuccessful)
return
}
notifs.AddNotification(notifications.ConfigurationUnsuccessful)
}
g.Add( g.Add(
func() error { func() error {
<-reloadReady.C <-reloadReady.C
@ -1089,7 +1107,7 @@ func main() {
for { for {
select { select {
case <-hup: case <-hup:
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err) level.Error(logger).Log("msg", "Error reloading config", "err", err)
} else if cfg.enableAutoReload { } else if cfg.enableAutoReload {
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
@ -1099,7 +1117,7 @@ func main() {
} }
} }
case rc := <-webHandler.Reload(): case rc := <-webHandler.Reload():
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err) level.Error(logger).Log("msg", "Error reloading config", "err", err)
rc <- err rc <- err
} else { } else {
@ -1124,7 +1142,7 @@ func main() {
} }
level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.")
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err) level.Error(logger).Log("msg", "Error reloading config", "err", err)
} else { } else {
checksum = currentChecksum checksum = currentChecksum
@ -1154,13 +1172,14 @@ func main() {
return nil return nil
} }
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err) return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err)
} }
reloadReady.Close() reloadReady.Close()
webHandler.SetReady(web.Ready) webHandler.SetReady(web.Ready)
notifs.DeleteNotification(notifications.StartingUp)
level.Info(logger).Log("msg", "Server is ready to receive web requests.") level.Info(logger).Log("msg", "Server is ready to receive web requests.")
<-cancel <-cancel
return nil return nil
@ -1380,7 +1399,7 @@ type reloader struct {
reloader func(*config.Config) error reloader func(*config.Config) error
} }
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) { func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
start := time.Now() start := time.Now()
timings := []interface{}{} timings := []interface{}{}
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
@ -1389,8 +1408,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
if err == nil { if err == nil {
configSuccess.Set(1) configSuccess.Set(1)
configSuccessTime.SetToCurrentTime() configSuccessTime.SetToCurrentTime()
callback(true)
} else { } else {
configSuccess.Set(0) configSuccess.Set(0)
callback(false)
} }
}() }()
@ -1597,6 +1618,10 @@ func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels
return 0, tsdb.ErrNotReady return 0, tsdb.ErrNotReady
} }
func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady
}
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
return 0, tsdb.ErrNotReady return 0, tsdb.ErrNotReady
} }

View file

@ -125,12 +125,61 @@ func (p *queryLogTest) query(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 200, r.StatusCode) require.Equal(t, 200, r.StatusCode)
case ruleOrigin: case ruleOrigin:
time.Sleep(2 * time.Second) // Poll the /api/v1/rules endpoint until a new rule evaluation is detected.
var lastEvalTime time.Time
for {
r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port))
require.NoError(t, err)
rulesBody, err := io.ReadAll(r.Body)
require.NoError(t, err)
defer r.Body.Close()
// Parse the rules response to find the last evaluation time.
newEvalTime := parseLastEvaluation(rulesBody)
if newEvalTime.After(lastEvalTime) {
if !lastEvalTime.IsZero() {
break
}
lastEvalTime = newEvalTime
}
time.Sleep(100 * time.Millisecond)
}
default: default:
panic("can't query this origin") panic("can't query this origin")
} }
} }
// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response.
func parseLastEvaluation(rulesBody []byte) time.Time {
var ruleResponse struct {
Status string `json:"status"`
Data struct {
Groups []struct {
Rules []struct {
LastEvaluation string `json:"lastEvaluation"`
} `json:"rules"`
} `json:"groups"`
} `json:"data"`
}
err := json.Unmarshal(rulesBody, &ruleResponse)
if err != nil {
return time.Time{}
}
for _, group := range ruleResponse.Data.Groups {
for _, rule := range group.Rules {
if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil {
return evalTime
}
}
}
return time.Time{}
}
// queryString returns the expected queryString of a this test. // queryString returns the expected queryString of a this test.
func (p *queryLogTest) queryString() string { func (p *queryLogTest) queryString() string {
switch p.origin { switch p.origin {
@ -322,7 +371,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
require.Len(t, ql, qc) require.Len(t, ql, qc)
} else { } else {
require.Greater(t, len(ql), qc, "no queries logged") require.GreaterOrEqual(t, len(ql), qc, "no queries logged")
} }
p.validateLastQuery(t, ql) p.validateLastQuery(t, ql)
qc = len(ql) qc = len(ql)
@ -353,7 +402,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() { if p.exactQueryCount() {
require.Len(t, ql, qc) require.Len(t, ql, qc)
} else { } else {
require.Greater(t, len(ql), qc, "no queries logged") require.GreaterOrEqual(t, len(ql), qc, "no queries logged")
} }
p.validateLastQuery(t, ql) p.validateLastQuery(t, ql)

View file

@ -291,7 +291,7 @@ func main() {
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String() promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String() promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings() featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings()
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden() documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
@ -321,24 +321,21 @@ func main() {
} }
} }
var noDefaultScrapePort bool
for _, f := range *featureList { for _, f := range *featureList {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "no-default-scrape-port":
noDefaultScrapePort = true
case "": case "":
continue continue
default: default:
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o) fmt.Printf(" WARNING: --enable-feature is currently a no-op")
} }
} }
} }
switch parsedCmd { switch parsedCmd {
case sdCheckCmd.FullCommand(): case sdCheckCmd.FullCommand():
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer)) os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
case checkConfigCmd.FullCommand(): case checkConfigCmd.FullCommand():
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...)) os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
@ -1219,7 +1216,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
for _, tg := range targetGroups { for _, tg := range targetGroups {
var failures []error var failures []error
targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb) targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb)
if len(failures) > 0 { if len(failures) > 0 {
first := failures[0] first := failures[0]
return first return first

View file

@ -146,7 +146,7 @@ func TestCheckSDFile(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, err := checkSDFile(test.file) _, err := checkSDFile(test.file)
if test.err != "" { if test.err != "" {
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
return return
} }
require.NoError(t, err) require.NoError(t, err)
@ -228,7 +228,7 @@ func TestCheckTargetConfig(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, err := checkConfig(false, "testdata/"+test.file, false) _, err := checkConfig(false, "testdata/"+test.file, false)
if test.err != "" { if test.err != "" {
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error()) require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
return return
} }
require.NoError(t, err) require.NoError(t, err)
@ -315,7 +315,7 @@ func TestCheckConfigSyntax(t *testing.T) {
expectedErrMsg = test.errWindows expectedErrMsg = test.errWindows
} }
if expectedErrMsg != "" { if expectedErrMsg != "" {
require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error()) require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error())
return return
} }
require.NoError(t, err) require.NoError(t, err)
@ -345,7 +345,7 @@ func TestAuthorizationConfig(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
_, err := checkConfig(false, "testdata/"+test.file, false) _, err := checkConfig(false, "testdata/"+test.file, false)
if test.err != "" { if test.err != "" {
require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error()) require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
return return
} }
require.NoError(t, err) require.NoError(t, err)

View file

@ -38,7 +38,7 @@ type sdCheckResult struct {
} }
// CheckSD performs service discovery for the given job name and reports the results. // CheckSD performs service discovery for the given job name and reports the results.
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int { func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
@ -114,7 +114,7 @@ outerLoop:
} }
results := []sdCheckResult{} results := []sdCheckResult{}
for _, tgs := range sdCheckResults { for _, tgs := range sdCheckResults {
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...) results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
} }
res, err := json.MarshalIndent(results, "", " ") res, err := json.MarshalIndent(results, "", " ")
@ -127,7 +127,7 @@ outerLoop:
return successExitCode return successExitCode
} }
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult { func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
sdCheckResults := []sdCheckResult{} sdCheckResults := []sdCheckResult{}
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
for _, targetGroup := range targetGroups { for _, targetGroup := range targetGroups {
@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
} }
} }
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort) res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
result := sdCheckResult{ result := sdCheckResult{
DiscoveredLabels: orig, DiscoveredLabels: orig,
Labels: res, Labels: res,

View file

@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) {
}, },
} }
testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
} }

View file

@ -2095,8 +2095,7 @@ func TestBadConfigs(t *testing.T) {
}() }()
for _, ee := range expectedErrors { for _, ee := range expectedErrors {
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) _, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
require.Error(t, err, "%s", ee.filename) require.ErrorContains(t, err, ee.errMsg,
require.Contains(t, err.Error(), ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
} }
} }

View file

@ -407,7 +407,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
err = d.getDatacenter() err = d.getDatacenter()
// An error should be returned. // An error should be returned.
require.Equal(t, tc.errMessage, err.Error()) require.EqualError(t, err, tc.errMessage)
// Should still be empty. // Should still be empty.
require.Equal(t, "", d.clientDatacenter) require.Equal(t, "", d.clientDatacenter)
} }

View file

@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) {
// UnmarshalYAML implements yaml.Unmarshaler. // UnmarshalYAML implements yaml.Unmarshaler.
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
cfgTyp := getConfigType(configsType) cfgTyp := reflect.StructOf(configFields)
cfgPtr := reflect.New(cfgTyp) cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem() cfgVal := cfgPtr.Elem()
@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
// MarshalYAML implements yaml.Marshaler. // MarshalYAML implements yaml.Marshaler.
func (c Configs) MarshalYAML() (interface{}, error) { func (c Configs) MarshalYAML() (interface{}, error) {
cfgTyp := getConfigType(configsType) cfgTyp := reflect.StructOf(configFields)
cfgPtr := reflect.New(cfgTyp) cfgPtr := reflect.New(cfgTyp)
cfgVal := cfgPtr.Elem() cfgVal := cfgPtr.Elem()

View file

@ -0,0 +1,36 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discovery
import (
"testing"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
func TestConfigsCustomUnMarshalMarshal(t *testing.T) {
input := `static_configs:
- targets:
- foo:1234
- bar:4321
`
cfg := &Configs{}
err := yaml.UnmarshalStrict([]byte(input), cfg)
require.NoError(t, err)
output, err := yaml.Marshal(cfg)
require.NoError(t, err)
require.Equal(t, input, string(output))
}

View file

@ -95,8 +95,7 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
targetGroups, err := d.refresh(context.Background()) targetGroups, err := d.refresh(context.Background())
require.Error(t, err) require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot")
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
require.Empty(t, targetGroups) require.Empty(t, targetGroups)
} }

View file

@ -361,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
target = target.Merge(podLabels(pod)) target = target.Merge(podLabels(pod))
// Attach potential container port labels matching the endpoint port. // Attach potential container port labels matching the endpoint port.
for _, c := range pod.Spec.Containers { containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
for i, c := range containers {
for _, cport := range c.Ports { for _, cport := range c.Ports {
if port.Port == cport.ContainerPort { if port.Port == cport.ContainerPort {
ports := strconv.FormatUint(uint64(port.Port), 10) ports := strconv.FormatUint(uint64(port.Port), 10)
isInit := i >= len(pod.Spec.Containers)
target[podContainerNameLabel] = lv(c.Name) target[podContainerNameLabel] = lv(c.Name)
target[podContainerImageLabel] = lv(c.Image) target[podContainerImageLabel] = lv(c.Image)
target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNameLabel] = lv(cport.Name)
target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortNumberLabel] = lv(ports)
target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
target[podContainerIsInit] = lv(strconv.FormatBool(isInit))
break break
} }
} }
@ -411,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
continue continue
} }
for _, c := range pe.pod.Spec.Containers { containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...)
for i, c := range containers {
for _, cport := range c.Ports { for _, cport := range c.Ports {
hasSeenPort := func() bool { hasSeenPort := func() bool {
for _, eport := range pe.servicePorts { for _, eport := range pe.servicePorts {
@ -428,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
isInit := i >= len(pe.pod.Spec.Containers)
target := model.LabelSet{ target := model.LabelSet{
model.AddressLabel: lv(a), model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
@ -435,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
podContainerPortNameLabel: lv(cport.Name), podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerPortProtocolLabel: lv(string(cport.Protocol)),
podContainerIsInit: lv(strconv.FormatBool(isInit)),
} }
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
} }

View file

@ -244,6 +244,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
{ {
"__address__": "1.2.3.4:9001", "__address__": "1.2.3.4:9001",
@ -259,6 +260,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_pod_container_port_number": "9001", "__meta_kubernetes_pod_container_port_number": "9001",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -821,6 +823,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -1078,6 +1081,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
"__meta_kubernetes_pod_container_port_number": "9000", "__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP", "__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -1089,3 +1093,167 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
}, },
}.Run(t) }.Run(t)
} }
func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
objs := []runtime.Object{
&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "testsidecar",
Namespace: "default",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: "4.3.2.1",
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "default",
},
},
},
Ports: []v1.EndpointPort{
{
Name: "testport",
Port: 9000,
Protocol: v1.ProtocolTCP,
},
{
Name: "initport",
Port: 9111,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "default",
UID: types.UID("deadbeef"),
},
Spec: v1.PodSpec{
NodeName: "testnode",
InitContainers: []v1.Container{
{
Name: "ic1",
Image: "ic1:latest",
Ports: []v1.ContainerPort{
{
Name: "initport",
ContainerPort: 1111,
Protocol: v1.ProtocolTCP,
},
},
},
{
Name: "ic2",
Image: "ic2:latest",
Ports: []v1.ContainerPort{
{
Name: "initport",
ContainerPort: 9111,
Protocol: v1.ProtocolTCP,
},
},
},
},
Containers: []v1.Container{
{
Name: "c1",
Image: "c1:latest",
Ports: []v1.ContainerPort{
{
Name: "mainport",
ContainerPort: 9000,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
Status: v1.PodStatus{
HostIP: "2.3.4.5",
PodIP: "4.3.2.1",
},
},
}
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...)
k8sDiscoveryTest{
discovery: n,
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
"endpoints/default/testsidecar": {
Targets: []model.LabelSet{
{
"__address__": "4.3.2.1:9000",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "testpod",
"__meta_kubernetes_endpoint_port_name": "testport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
},
{
"__address__": "4.3.2.1:9111",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "testpod",
"__meta_kubernetes_endpoint_port_name": "initport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_pod_container_image": "ic2:latest",
"__meta_kubernetes_pod_container_name": "ic2",
"__meta_kubernetes_pod_container_port_name": "initport",
"__meta_kubernetes_pod_container_port_number": "9111",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "true",
},
{
"__address__": "4.3.2.1:1111",
"__meta_kubernetes_pod_container_image": "ic1:latest",
"__meta_kubernetes_pod_container_name": "ic1",
"__meta_kubernetes_pod_container_port_name": "initport",
"__meta_kubernetes_pod_container_port_number": "1111",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "true",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpoints_name": "testsidecar",
"__meta_kubernetes_namespace": "default",
},
Source: "endpoints/default/testsidecar",
},
},
}.Run(t)
}

View file

@ -377,19 +377,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
target = target.Merge(podLabels(pod)) target = target.Merge(podLabels(pod))
// Attach potential container port labels matching the endpoint port. // Attach potential container port labels matching the endpoint port.
for _, c := range pod.Spec.Containers { containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
for i, c := range containers {
for _, cport := range c.Ports { for _, cport := range c.Ports {
if port.port() == nil { if port.port() == nil {
continue continue
} }
if *port.port() == cport.ContainerPort { if *port.port() == cport.ContainerPort {
ports := strconv.FormatUint(uint64(*port.port()), 10) ports := strconv.FormatUint(uint64(*port.port()), 10)
isInit := i >= len(pod.Spec.Containers)
target[podContainerNameLabel] = lv(c.Name) target[podContainerNameLabel] = lv(c.Name)
target[podContainerImageLabel] = lv(c.Image) target[podContainerImageLabel] = lv(c.Image)
target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNameLabel] = lv(cport.Name)
target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortNumberLabel] = lv(ports)
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
target[podContainerIsInit] = lv(strconv.FormatBool(isInit))
break break
} }
} }
@ -417,7 +421,8 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
continue continue
} }
for _, c := range pe.pod.Spec.Containers { containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...)
for i, c := range containers {
for _, cport := range c.Ports { for _, cport := range c.Ports {
hasSeenPort := func() bool { hasSeenPort := func() bool {
for _, eport := range pe.servicePorts { for _, eport := range pe.servicePorts {
@ -437,6 +442,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
isInit := i >= len(pe.pod.Spec.Containers)
target := model.LabelSet{ target := model.LabelSet{
model.AddressLabel: lv(a), model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
@ -444,6 +450,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
podContainerPortNameLabel: lv(cport.Name), podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerPortProtocolLabel: lv(string(cport.Protocol)),
podContainerIsInit: lv(strconv.FormatBool(isInit)),
} }
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
} }

View file

@ -291,6 +291,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
{ {
"__address__": "1.2.3.4:9001", "__address__": "1.2.3.4:9001",
@ -306,6 +307,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
"__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -986,6 +988,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_pod_phase": "", "__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown", "__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef", "__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
}, },
}, },
Labels: model.LabelSet{ Labels: model.LabelSet{
@ -1199,3 +1202,165 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) {
}) })
} }
} }
func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) {
objs := []runtime.Object{
&v1.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testsidecar",
Namespace: "default",
},
AddressType: v1.AddressTypeIPv4,
Ports: []v1.EndpointPort{
{
Name: strptr("testport"),
Port: int32ptr(9000),
Protocol: protocolptr(corev1.ProtocolTCP),
},
{
Name: strptr("initport"),
Port: int32ptr(9111),
Protocol: protocolptr(corev1.ProtocolTCP),
},
},
Endpoints: []v1.Endpoint{
{
Addresses: []string{"4.3.2.1"},
TargetRef: &corev1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "default",
},
},
},
},
&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "default",
UID: types.UID("deadbeef"),
},
Spec: corev1.PodSpec{
NodeName: "testnode",
InitContainers: []corev1.Container{
{
Name: "ic1",
Image: "ic1:latest",
Ports: []corev1.ContainerPort{
{
Name: "initport",
ContainerPort: 1111,
Protocol: corev1.ProtocolTCP,
},
},
},
{
Name: "ic2",
Image: "ic2:latest",
Ports: []corev1.ContainerPort{
{
Name: "initport",
ContainerPort: 9111,
Protocol: corev1.ProtocolTCP,
},
},
},
},
Containers: []corev1.Container{
{
Name: "c1",
Image: "c1:latest",
Ports: []corev1.ContainerPort{
{
Name: "mainport",
ContainerPort: 9000,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
Status: corev1.PodStatus{
HostIP: "2.3.4.5",
PodIP: "4.3.2.1",
},
},
}
n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...)
k8sDiscoveryTest{
discovery: n,
expectedMaxItems: 1,
expectedRes: map[string]*targetgroup.Group{
"endpointslice/default/testsidecar": {
Targets: []model.LabelSet{
{
"__address__": "4.3.2.1:9000",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "testpod",
"__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "false",
},
{
"__address__": "4.3.2.1:9111",
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
"__meta_kubernetes_endpointslice_address_target_name": "testpod",
"__meta_kubernetes_endpointslice_port": "9111",
"__meta_kubernetes_endpointslice_port_name": "initport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
"__meta_kubernetes_pod_container_image": "ic2:latest",
"__meta_kubernetes_pod_container_name": "ic2",
"__meta_kubernetes_pod_container_port_name": "initport",
"__meta_kubernetes_pod_container_port_number": "9111",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "true",
},
{
"__address__": "4.3.2.1:1111",
"__meta_kubernetes_pod_container_image": "ic1:latest",
"__meta_kubernetes_pod_container_name": "ic1",
"__meta_kubernetes_pod_container_port_name": "initport",
"__meta_kubernetes_pod_container_port_number": "1111",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_phase": "",
"__meta_kubernetes_pod_ready": "unknown",
"__meta_kubernetes_pod_uid": "deadbeef",
"__meta_kubernetes_pod_container_init": "true",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_endpointslice_address_type": "IPv4",
"__meta_kubernetes_endpointslice_name": "testsidecar",
"__meta_kubernetes_namespace": "default",
},
Source: "endpointslice/default/testsidecar",
},
},
}.Run(t)
}

View file

@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cancel() cancel()
_, err := hypervisor.refresh(ctx) _, err := hypervisor.refresh(ctx)
require.Error(t, err) require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
} }

View file

@ -134,6 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cancel() cancel()
_, err := hypervisor.refresh(ctx) _, err := hypervisor.refresh(ctx)
require.Error(t, err) require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
} }

View file

@ -21,7 +21,6 @@ import (
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"strconv" "strconv"
"strings"
"testing" "testing"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
td, m, _ := newTritonDiscovery(conf) td, m, _ := newTritonDiscovery(conf)
_, err := td.refresh(context.Background()) _, err := td.refresh(context.Background())
require.Error(t, err) require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint")
require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"))
m.Unregister() m.Unregister()
} }
@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cancel() cancel()
_, err := td.refresh(ctx) _, err := td.refresh(ctx)
require.Error(t, err) require.ErrorContains(t, err, context.Canceled.Error())
require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
m.Unregister() m.Unregister()
} }

View file

@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) {
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring") endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring")
require.Empty(t, endpointURL) require.Empty(t, endpointURL)
require.Error(t, err) require.EqualError(t, err, "invalid xDS server URL")
require.Equal(t, "invalid xDS server URL", err.Error())
} }
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) { func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring") endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
require.Empty(t, endpointURL) require.Empty(t, endpointURL)
require.Error(t, err) require.ErrorContains(t, err, "must be either 'http' or 'https'")
require.Contains(t, err.Error(), "must be either 'http' or 'https'")
} }
func TestMakeXDSResourceHttpEndpoint(t *testing.T) { func TestMakeXDSResourceHttpEndpoint(t *testing.T) {

View file

@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) {
}} }}
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL) groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
require.Nil(t, groups) require.Nil(t, groups)
require.Error(t, err)
require.Contains(t, err.Error(), "cannot parse") require.ErrorContains(t, err, "cannot parse")
} }
func TestNewKumaHTTPDiscovery(t *testing.T) { func TestNewKumaHTTPDiscovery(t *testing.T) {

View file

@ -21,6 +21,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | | | <code class="text-nowrap">--web.config.file</code> | [EXPERIMENTAL] Path to configuration file that can enable TLS or authentication. | |
| <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` | | <code class="text-nowrap">--web.read-timeout</code> | Maximum duration before timing out read of the request, and closing idle connections. | `5m` |
| <code class="text-nowrap">--web.max-connections</code> | Maximum number of simultaneous connections across all listeners. | `512` | | <code class="text-nowrap">--web.max-connections</code> | Maximum number of simultaneous connections across all listeners. | `512` |
| <code class="text-nowrap">--web.max-notifications-subscribers</code> | Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close. | `16` |
| <code class="text-nowrap">--web.external-url</code> | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | | | <code class="text-nowrap">--web.external-url</code> | The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically. | |
| <code class="text-nowrap">--web.route-prefix</code> | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | | | <code class="text-nowrap">--web.route-prefix</code> | Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url. | |
| <code class="text-nowrap">--web.user-assets</code> | Path to static asset directory, available at /user. | | | <code class="text-nowrap">--web.user-assets</code> | Path to static asset directory, available at /user. | |
@ -28,6 +29,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` | | <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` | | <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
| <code class="text-nowrap">--web.remote-write-receiver.accepted-protobuf-messages</code> | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` | | <code class="text-nowrap">--web.remote-write-receiver.accepted-protobuf-messages</code> | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` |
| <code class="text-nowrap">--web.enable-otlp-receiver</code> | Enable API endpoint accepting OTLP write requests. | `false` |
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` | | <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` | | <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` | | <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
@ -56,7 +58,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | | | <code class="text-nowrap">--agent</code> | Run Prometheus in 'Agent mode'. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -15,7 +15,7 @@ Tooling for the Prometheus monitoring system.
| <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). | | <code class="text-nowrap">-h</code>, <code class="text-nowrap">--help</code> | Show context-sensitive help (also try --help-long and --help-man). |
| <code class="text-nowrap">--version</code> | Show application version. | | <code class="text-nowrap">--version</code> | Show application version. |
| <code class="text-nowrap">--experimental</code> | Enable experimental commands. | | <code class="text-nowrap">--experimental</code> | Enable experimental commands. |
| <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details. | | <code class="text-nowrap">--enable-feature</code> <code class="text-nowrap">...<code class="text-nowrap"> | Comma separated feature names to enable. Currently unused. |

View file

@ -71,15 +71,6 @@ When enabled, the GOMEMLIMIT variable is automatically set to match the Linux co
There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus. There is also an additional tuning flag, `--auto-gomemlimit.ratio`, which allows controlling how much of the memory is used for Prometheus. The remainder is reserved for memory outside the process. For example, kernel page cache. Page cache is important for Prometheus TSDB query performance. The default is `0.9`, which means 90% of the memory limit will be used for Prometheus.
## No default scrape port
`--enable-feature=no-default-scrape-port`
When enabled, the default ports for HTTP (`:80`) or HTTPS (`:443`) will _not_ be added to
the address used to scrape a target (the value of the `__address_` label), contrary to the default behavior.
In addition, if a default HTTP or HTTPS port has already been added either in a static configuration or
by a service discovery mechanism and the respective scheme is specified (`http` or `https`), that port will be removed.
## Native Histograms ## Native Histograms
`--enable-feature=native-histograms` `--enable-feature=native-histograms`
@ -156,14 +147,6 @@ This should **only** be applied to metrics that currently produce such labels.
regex: (\d+)\.0+;.*_bucket regex: (\d+)\.0+;.*_bucket
``` ```
## OTLP Receiver
`--enable-feature=otlp-write-receiver`
The OTLP receiver allows Prometheus to accept [OpenTelemetry](https://opentelemetry.io/) metrics writes.
Prometheus is best used as a Pull based system, and staleness, `up` metric, and other Pull enabled features
won't work when you push OTLP metrics.
## Experimental PromQL functions ## Experimental PromQL functions
`--enable-feature=promql-experimental-functions` `--enable-feature=promql-experimental-functions`

View file

@ -1388,8 +1388,8 @@ is not considered an efficient way of ingesting samples. Use it
with caution for specific low-volume use cases. It is not suitable for with caution for specific low-volume use cases. It is not suitable for
replacing the ingestion via scraping. replacing the ingestion via scraping.
Enable the OTLP receiver by the feature flag Enable the OTLP receiver by setting
`--enable-feature=otlp-write-receiver`. When enabled, the OTLP receiver `--web.enable-otlp-receiver`. When enabled, the OTLP receiver
endpoint is `/api/v1/otlp/v1/metrics`. endpoint is `/api/v1/otlp/v1/metrics`.
*New in v2.47* *New in v2.47*

View file

@ -109,8 +109,16 @@ single sample value for each at a given timestamp (point in time). In the simpl
form, only a metric name is specified, which results in an instant vector form, only a metric name is specified, which results in an instant vector
containing elements for all time series that have this metric name. containing elements for all time series that have this metric name.
The value returned will be that of the most recent sample at or before the
query's evaluation timestamp (in the case of an
[instant query](api.md#instant-queries))
or the current step within the query (in the case of a
[range query](api.md/#range-queries)).
The [`@` modifier](#modifier) allows overriding the timestamp relative to which
the selection takes place. Time series are only returned if their most recent sample is less than the [lookback period](#staleness) ago.
This example selects all time series that have the `http_requests_total` metric This example selects all time series that have the `http_requests_total` metric
name: name, returning the most recent sample for each:
http_requests_total http_requests_total
@ -359,7 +367,8 @@ cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated
time series do not precisely align in time. Because of their independence, time series do not precisely align in time. Because of their independence,
Prometheus needs to assign a value at those timestamps for each relevant time Prometheus needs to assign a value at those timestamps for each relevant time
series. It does so by taking the newest sample that is less than the lookback period ago. series. It does so by taking the newest sample that is less than the lookback period ago.
The lookback period is 5 minutes by default. The lookback period is 5 minutes by default, but can be
[set with the `--query.lookback-delta` flag](../command-line/prometheus.md)
If a target scrape or rule evaluation no longer returns a sample for a time If a target scrape or rule evaluation no longer returns a sample for a time
series that was previously present, this time series will be marked as stale. series that was previously present, this time series will be marked as stale.

View file

@ -8,8 +8,8 @@ require (
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.6 github.com/influxdata/influxdb v1.11.6
github.com/prometheus/client_golang v1.20.2 github.com/prometheus/client_golang v1.20.4
github.com/prometheus/common v0.57.0 github.com/prometheus/common v0.60.0
github.com/prometheus/prometheus v0.53.1 github.com/prometheus/prometheus v0.53.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )
@ -55,11 +55,11 @@ require (
go.opentelemetry.io/otel/trace v1.27.0 // indirect go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.25.0 // indirect golang.org/x/crypto v0.27.0 // indirect
golang.org/x/net v0.27.0 // indirect golang.org/x/net v0.29.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sys v0.22.0 // indirect golang.org/x/sys v0.25.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/text v0.18.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/grpc v1.65.0 // indirect google.golang.org/grpc v1.65.0 // indirect

View file

@ -253,8 +253,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg= github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -264,8 +264,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY= github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI= github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -323,8 +323,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -344,20 +344,20 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -373,17 +373,17 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

44
go.mod
View file

@ -43,7 +43,7 @@ require (
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.9 github.com/klauspost/compress v1.17.9
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.40.0 github.com/linode/linodego v1.41.0
github.com/miekg/dns v1.1.62 github.com/miekg/dns v1.1.62
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -62,17 +62,17 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/pdata v1.16.0
go.opentelemetry.io/collector/semconv v0.108.1 go.opentelemetry.io/collector/semconv v0.110.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0
go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel v1.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0
go.opentelemetry.io/otel/sdk v1.29.0 go.opentelemetry.io/otel/sdk v1.30.0
go.opentelemetry.io/otel/trace v1.29.0 go.opentelemetry.io/otel/trace v1.30.0
go.uber.org/atomic v1.11.0 go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.6.0
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/oauth2 v0.23.0 golang.org/x/oauth2 v0.23.0
@ -81,9 +81,9 @@ require (
golang.org/x/text v0.18.0 golang.org/x/text v0.18.0
golang.org/x/time v0.6.0 golang.org/x/time v0.6.0
golang.org/x/tools v0.24.0 golang.org/x/tools v0.24.0
google.golang.org/api v0.195.0 google.golang.org/api v0.199.0
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
google.golang.org/grpc v1.66.0 google.golang.org/grpc v1.67.0
google.golang.org/protobuf v1.34.2 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
@ -95,9 +95,9 @@ require (
) )
require ( require (
cloud.google.com/go/auth v0.9.3 // indirect cloud.google.com/go/auth v0.9.5 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/compute/metadata v0.5.0 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
@ -106,7 +106,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cilium/ebpf v0.11.0 // indirect github.com/cilium/ebpf v0.11.0 // indirect
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/log v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@ -133,7 +133,7 @@ require (
github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/go-resty/resty/v2 v2.13.1 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/glog v1.2.1 // indirect github.com/golang/glog v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
@ -188,13 +188,13 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.30.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.26.0 // indirect golang.org/x/crypto v0.27.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.20.0 // indirect golang.org/x/mod v0.20.0 // indirect
golang.org/x/net v0.28.0 // indirect golang.org/x/net v0.29.0 // indirect
golang.org/x/term v0.23.0 // indirect golang.org/x/term v0.24.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect

88
go.sum
View file

@ -12,8 +12,8 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw=
cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
@ -22,8 +22,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -120,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg=
github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
@ -251,8 +251,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -472,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY=
github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -732,34 +732,34 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto=
go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4=
go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk=
go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8=
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE=
go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg=
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@ -782,8 +782,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -865,8 +865,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -963,8 +963,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1056,8 +1056,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.195.0 h1:Ude4N8FvTKnnQJHU48RFI40jOBgIrL8Zqr3/QeST6yU= google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
google.golang.org/api v0.195.0/go.mod h1:DOGRWuv3P8TU8Lnz7uQc4hyNqrBpMtD9ppW3wBJurgc= google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1094,8 +1094,8 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -1116,8 +1116,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw=
google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View file

@ -85,9 +85,8 @@ func TestParseFileFailure(t *testing.T) {
for _, c := range table { for _, c := range table {
_, errs := ParseFile(filepath.Join("testdata", c.filename)) _, errs := ParseFile(filepath.Join("testdata", c.filename))
require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename) require.NotEmpty(t, errs, "Expected error parsing %s but got none", c.filename)
require.Error(t, errs[0]) require.ErrorContainsf(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
require.Containsf(t, errs[0].Error(), c.errMsg, "Expected error for %s.", c.filename)
} }
} }
@ -259,8 +258,7 @@ func TestError(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got := tt.error.Error() require.EqualError(t, tt.error, tt.want)
require.Equal(t, tt.want, got)
}) })
} }
} }
@ -308,8 +306,7 @@ func TestWrappedError(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got := tt.wrappedError.Error() require.EqualError(t, tt.wrappedError, tt.want)
require.Equal(t, tt.want, got)
}) })
} }
} }

View file

@ -0,0 +1,177 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package textparse
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"testing"
"github.com/prometheus/prometheus/model/exemplar"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
)
type newParser func([]byte, *labels.SymbolTable) Parser
var newTestParserFns = map[string]newParser{
"promtext": NewPromParser,
"promproto": func(b []byte, st *labels.SymbolTable) Parser {
return NewProtobufParser(b, true, st)
},
"omtext": func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st, WithOMParserCTSeriesSkipped())
},
}
// BenchmarkParse benchmarks parsing, mimicking how scrape/scrape.go#append use it.
// Typically used as follows:
/*
export bench=v1 && go test ./model/textparse/... \
-run '^$' -bench '^BenchmarkParse' \
-benchtime 2s -count 6 -cpu 2 -benchmem -timeout 999m \
| tee ${bench}.txt
*/
// For profiles, add -memprofile=${bench}.mem.pprof -cpuprofile=${bench}.cpu.pprof
// options.
//
// NOTE(bwplotka): Previous iterations of this benchmark had different cases for isolated
// Series, Series+Metrics with and without reuse, Series+CT. Those cases are sometimes
// good to know if you are working on a certain optimization, but it does not
// make sense to persist such cases for everybody (e.g. for CI one day).
// For local iteration, feel free to adjust cases/comment out code etc.
//
// NOTE(bwplotka): Do not try to conclude "what parser (OM, proto, prom) is the fastest"
// as the testdata has different amount and type of metrics and features (e.g. exemplars).
func BenchmarkParse(b *testing.B) {
for _, bcase := range []struct {
dataFile string // Localized to "./testdata".
dataProto []byte
parser string
compareToExpfmtFormat expfmt.FormatType
}{
{dataFile: "promtestdata.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain},
{dataFile: "promtestdata.nometa.txt", parser: "promtext", compareToExpfmtFormat: expfmt.TypeTextPlain},
// We don't pass compareToExpfmtFormat: expfmt.TypeProtoDelim as expfmt does not support GAUGE_HISTOGRAM, see https://github.com/prometheus/common/issues/430.
{dataProto: createTestProtoBuf(b).Bytes(), parser: "promproto"},
// We don't pass compareToExpfmtFormat: expfmt.TypeOpenMetrics as expfmt does not support OM exemplars, see https://github.com/prometheus/common/issues/703.
{dataFile: "omtestdata.txt", parser: "omtext"},
{dataFile: "promtestdata.txt", parser: "omtext"}, // Compare how omtext parser deals with Prometheus text format vs promtext.
} {
var buf []byte
dataCase := bcase.dataFile
if len(bcase.dataProto) > 0 {
dataCase = "createTestProtoBuf()"
buf = bcase.dataProto
} else {
f, err := os.Open(filepath.Join("testdata", bcase.dataFile))
require.NoError(b, err)
b.Cleanup(func() {
_ = f.Close()
})
buf, err = io.ReadAll(f)
require.NoError(b, err)
}
b.Run(fmt.Sprintf("data=%v/parser=%v", dataCase, bcase.parser), func(b *testing.B) {
newParserFn := newTestParserFns[bcase.parser]
var (
res labels.Labels
e exemplar.Exemplar
)
b.SetBytes(int64(len(buf)))
b.ReportAllocs()
b.ResetTimer()
st := labels.NewSymbolTable()
for i := 0; i < b.N; i++ {
p := newParserFn(buf, st)
Inner:
for {
t, err := p.Next()
switch t {
case EntryInvalid:
if errors.Is(err, io.EOF) {
break Inner
}
b.Fatal(err)
case EntryType:
_, _ = p.Type()
continue
case EntryHelp:
_, _ = p.Help()
continue
case EntryUnit:
_, _ = p.Unit()
continue
case EntryComment:
continue
case EntryHistogram:
_, _, _, _ = p.Histogram()
case EntrySeries:
_, _, _ = p.Series()
default:
b.Fatal("not implemented entry", t)
}
_ = p.Metric(&res)
_ = p.CreatedTimestamp()
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
}
}
}
})
b.Run(fmt.Sprintf("data=%v/parser=xpfmt", dataCase), func(b *testing.B) {
if bcase.compareToExpfmtFormat == expfmt.TypeUnknown {
b.Skip("compareToExpfmtFormat not set")
}
b.SetBytes(int64(len(buf)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
decSamples := make(model.Vector, 0, 50)
sdec := expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(bcase.compareToExpfmtFormat)),
Opts: &expfmt.DecodeOptions{
Timestamp: model.TimeFromUnixNano(0),
},
}
for {
if err := sdec.Decode(&decSamples); err != nil {
if errors.Is(err, io.EOF) {
break
}
b.Fatal(err)
}
decSamples = decSamples[:0]
}
}
})
}
}

View file

@ -80,7 +80,7 @@ type Parser interface {
// //
// This function always returns a valid parser, but might additionally // This function always returns a valid parser, but might additionally
// return an error if the content type cannot be parsed. // return an error if the content type cannot be parsed.
func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { func New(b []byte, contentType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) {
if contentType == "" { if contentType == "" {
return NewPromParser(b, st), nil return NewPromParser(b, st), nil
} }
@ -91,7 +91,9 @@ func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.S
} }
switch mediaType { switch mediaType {
case "application/openmetrics-text": case "application/openmetrics-text":
return NewOpenMetricsParser(b, st), nil return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) {
o.SkipCTSeries = skipOMCTSeries
}), nil
case "application/vnd.google.protobuf": case "application/vnd.google.protobuf":
return NewProtobufParser(b, parseClassicHistograms, st), nil return NewProtobufParser(b, parseClassicHistograms, st), nil
default: default:

View file

@ -93,13 +93,12 @@ func TestNewParser(t *testing.T) {
tt := tt // Copy to local variable before going parallel. tt := tt // Copy to local variable before going parallel.
t.Parallel() t.Parallel()
p, err := New([]byte{}, tt.contentType, false, labels.NewSymbolTable()) p, err := New([]byte{}, tt.contentType, false, false, labels.NewSymbolTable())
tt.validateParser(t, p) tt.validateParser(t, p)
if tt.err == "" { if tt.err == "" {
require.NoError(t, err) require.NoError(t, err)
} else { } else {
require.Error(t, err) require.ErrorContains(t, err, tt.err)
require.Contains(t, err.Error(), tt.err)
} }
}) })
} }

View file

@ -95,6 +95,12 @@ type OpenMetricsParser struct {
exemplarTs int64 exemplarTs int64
hasExemplarTs bool hasExemplarTs bool
// Created timestamp parsing state.
ct int64
ctHashSet uint64
// visitedName is the metric name of the last visited metric when peeking ahead
// for _created series during the execution of the CreatedTimestamp method.
visitedName string
skipCTSeries bool skipCTSeries bool
} }
@ -254,6 +260,9 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
func (p *OpenMetricsParser) CreatedTimestamp() *int64 { func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
if !typeRequiresCT(p.mtype) { if !typeRequiresCT(p.mtype) {
// Not a CT supported metric type, fast path. // Not a CT supported metric type, fast path.
p.ct = 0
p.visitedName = ""
p.ctHashSet = 0
return nil return nil
} }
@ -264,27 +273,44 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
) )
p.Metric(&currLset) p.Metric(&currLset)
currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
// Search for the _created line for the currFamilyLsetHash using ephemeral parser until currName := currLset.Get(model.MetricNameLabel)
// we see EOF or new metric family. We have to do it as we don't know where (and if) currName = findBaseMetricName(currName)
// that CT line is.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. // make sure we're on a new metric before returning
peek := deepCopy(p) if currName == p.visitedName && currFamilyLsetHash == p.ctHashSet && p.visitedName != "" && p.ctHashSet > 0 && p.ct > 0 {
// CT is already known, fast path.
return &p.ct
}
// Create a new lexer to reset the parser once this function is done executing.
resetLexer := &openMetricsLexer{
b: p.l.b,
i: p.l.i,
start: p.l.start,
err: p.l.err,
state: p.l.state,
}
p.skipCTSeries = false
for { for {
eType, err := peek.Next() eType, err := p.Next()
if err != nil { if err != nil {
// This means peek will give error too later on, so def no CT line found. // This means p.Next() will give error too later on, so def no CT line found.
// This might result in partial scrape with wrong/missing CT, but only // This might result in partial scrape with wrong/missing CT, but only
// spec improvement would help. // spec improvement would help.
// TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
p.resetCTParseValues(resetLexer)
return nil return nil
} }
if eType != EntrySeries { if eType != EntrySeries {
// Assume we hit different family, no CT line found. // Assume we hit different family, no CT line found.
p.resetCTParseValues(resetLexer)
return nil return nil
} }
var peekedLset labels.Labels var peekedLset labels.Labels
peek.Metric(&peekedLset) p.Metric(&peekedLset)
peekedName := peekedLset.Get(model.MetricNameLabel) peekedName := peekedLset.Get(model.MetricNameLabel)
if !strings.HasSuffix(peekedName, "_created") { if !strings.HasSuffix(peekedName, "_created") {
// Not a CT line, search more. // Not a CT line, search more.
@ -294,14 +320,52 @@ func (p *OpenMetricsParser) CreatedTimestamp() *int64 {
// We got a CT line here, but let's search if CT line is actually for our series, edge case. // We got a CT line here, but let's search if CT line is actually for our series, edge case.
peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile")
if peekWithoutNameLsetHash != currFamilyLsetHash { if peekWithoutNameLsetHash != currFamilyLsetHash {
// CT line for a different series, for our series no CT. // Found CT line for a different series, for our series no CT.
p.resetCTParseValues(resetLexer)
return nil return nil
} }
ct := int64(peek.val)
// All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds.
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps
ct := int64(p.val * 1000.0)
p.setCTParseValues(ct, currFamilyLsetHash, currName, true, resetLexer)
return &ct return &ct
} }
} }
// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found.
// This is useful to prevent re-parsing the same series again and early return the CT value.
func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, visitedName string, skipCTSeries bool, resetLexer *openMetricsLexer) {
p.ct = ct
p.l = resetLexer
p.ctHashSet = ctHashSet
p.visitedName = visitedName
p.skipCTSeries = skipCTSeries
}
// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called.
func (p *OpenMetricsParser) resetCTParseValues(resetLexer *openMetricsLexer) {
p.l = resetLexer
p.ct = 0
p.ctHashSet = 0
p.visitedName = ""
p.skipCTSeries = true
}
// findBaseMetricName returns the metric name without reserved suffixes such as "_created",
// "_sum", etc. based on the OpenMetrics specification found at
// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md.
// If no suffix is found, the original name is returned.
func findBaseMetricName(name string) string {
suffixes := []string{"_created", "_count", "_sum", "_bucket", "_total", "_gcount", "_gsum", "_info"}
for _, suffix := range suffixes {
if strings.HasSuffix(name, suffix) {
return strings.TrimSuffix(name, suffix)
}
}
return name
}
// typeRequiresCT returns true if the metric type requires a _created timestamp. // typeRequiresCT returns true if the metric type requires a _created timestamp.
func typeRequiresCT(t model.MetricType) bool { func typeRequiresCT(t model.MetricType) bool {
switch t { switch t {
@ -312,29 +376,6 @@ func typeRequiresCT(t model.MetricType) bool {
} }
} }
// deepCopy creates a copy of a parser without re-using the slices' original memory addresses.
func deepCopy(p *OpenMetricsParser) OpenMetricsParser {
newB := make([]byte, len(p.l.b))
copy(newB, p.l.b)
newLexer := &openMetricsLexer{
b: newB,
i: p.l.i,
start: p.l.start,
err: p.l.err,
state: p.l.state,
}
newParser := OpenMetricsParser{
l: newLexer,
builder: p.builder,
mtype: p.mtype,
val: p.val,
skipCTSeries: false,
}
return newParser
}
// nextToken returns the next token from the openMetricsLexer. // nextToken returns the next token from the openMetricsLexer.
func (p *OpenMetricsParser) nextToken() token { func (p *OpenMetricsParser) nextToken() token {
tok := p.l.Lex() tok := p.l.Lex()

View file

@ -69,26 +69,47 @@ testmetric{label="\"bar\""} 1
# HELP foo Counter with and without labels to certify CT is parsed for both cases # HELP foo Counter with and without labels to certify CT is parsed for both cases
# TYPE foo counter # TYPE foo counter
foo_total 17.0 1520879607.789 # {id="counter-test"} 5 foo_total 17.0 1520879607.789 # {id="counter-test"} 5
foo_created 1000 foo_created 1520872607.123
foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5 foo_total{a="b"} 17.0 1520879607.789 # {id="counter-test"} 5
foo_created{a="b"} 1000 foo_created{a="b"} 1520872607.123
# HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far # HELP bar Summary with CT at the end, making sure we find CT even if it's multiple lines a far
# TYPE bar summary # TYPE bar summary
bar_count 17.0 bar_count 17.0
bar_sum 324789.3 bar_sum 324789.3
bar{quantile="0.95"} 123.7 bar{quantile="0.95"} 123.7
bar{quantile="0.99"} 150.0 bar{quantile="0.99"} 150.0
bar_created 1520430000 bar_created 1520872608.124
# HELP baz Histogram with the same objective as above's summary # HELP baz Histogram with the same objective as above's summary
# TYPE baz histogram # TYPE baz histogram
baz_bucket{le="0.0"} 0 baz_bucket{le="0.0"} 0
baz_bucket{le="+Inf"} 17 baz_bucket{le="+Inf"} 17
baz_count 17 baz_count 17
baz_sum 324789.3 baz_sum 324789.3
baz_created 1520430000 baz_created 1520872609.125
# HELP fizz_created Gauge which shouldn't be parsed as CT # HELP fizz_created Gauge which shouldn't be parsed as CT
# TYPE fizz_created gauge # TYPE fizz_created gauge
fizz_created 17.0` fizz_created 17.0
# HELP something Histogram with _created between buckets and summary
# TYPE something histogram
something_count 18
something_sum 324789.4
something_created 1520430001
something_bucket{le="0.0"} 1
something_bucket{le="+Inf"} 18
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 20
yum_sum 324789.5
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_count 21
foobar_created 1520430004
foobar_sum 324789.6
foobar{quantile="0.95"} 123.8
foobar{quantile="0.99"} 150.1`
input += "\n# HELP metric foo\x00bar" input += "\n# HELP metric foo\x00bar"
input += "\nnull_byte_metric{a=\"abc\x00\"} 1" input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
@ -250,14 +271,14 @@ fizz_created 17.0`
lset: labels.FromStrings("__name__", "foo_total"), lset: labels.FromStrings("__name__", "foo_total"),
t: int64p(1520879607789), t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000), ct: int64p(1520872607123),
}, { }, {
m: `foo_total{a="b"}`, m: `foo_total{a="b"}`,
v: 17.0, v: 17.0,
lset: labels.FromStrings("__name__", "foo_total", "a", "b"), lset: labels.FromStrings("__name__", "foo_total", "a", "b"),
t: int64p(1520879607789), t: int64p(1520879607789),
e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5}, e: &exemplar.Exemplar{Labels: labels.FromStrings("id", "counter-test"), Value: 5},
ct: int64p(1000), ct: int64p(1520872607123),
}, { }, {
m: "bar", m: "bar",
help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far", help: "Summary with CT at the end, making sure we find CT even if it's multiple lines a far",
@ -268,22 +289,22 @@ fizz_created 17.0`
m: "bar_count", m: "bar_count",
v: 17.0, v: 17.0,
lset: labels.FromStrings("__name__", "bar_count"), lset: labels.FromStrings("__name__", "bar_count"),
ct: int64p(1520430000), ct: int64p(1520872608124),
}, { }, {
m: "bar_sum", m: "bar_sum",
v: 324789.3, v: 324789.3,
lset: labels.FromStrings("__name__", "bar_sum"), lset: labels.FromStrings("__name__", "bar_sum"),
ct: int64p(1520430000), ct: int64p(1520872608124),
}, { }, {
m: `bar{quantile="0.95"}`, m: `bar{quantile="0.95"}`,
v: 123.7, v: 123.7,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"), lset: labels.FromStrings("__name__", "bar", "quantile", "0.95"),
ct: int64p(1520430000), ct: int64p(1520872608124),
}, { }, {
m: `bar{quantile="0.99"}`, m: `bar{quantile="0.99"}`,
v: 150.0, v: 150.0,
lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"), lset: labels.FromStrings("__name__", "bar", "quantile", "0.99"),
ct: int64p(1520430000), ct: int64p(1520872608124),
}, { }, {
m: "baz", m: "baz",
help: "Histogram with the same objective as above's summary", help: "Histogram with the same objective as above's summary",
@ -294,22 +315,22 @@ fizz_created 17.0`
m: `baz_bucket{le="0.0"}`, m: `baz_bucket{le="0.0"}`,
v: 0, v: 0,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"), lset: labels.FromStrings("__name__", "baz_bucket", "le", "0.0"),
ct: int64p(1520430000), ct: int64p(1520872609125),
}, { }, {
m: `baz_bucket{le="+Inf"}`, m: `baz_bucket{le="+Inf"}`,
v: 17, v: 17,
lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"), lset: labels.FromStrings("__name__", "baz_bucket", "le", "+Inf"),
ct: int64p(1520430000), ct: int64p(1520872609125),
}, { }, {
m: `baz_count`, m: `baz_count`,
v: 17, v: 17,
lset: labels.FromStrings("__name__", "baz_count"), lset: labels.FromStrings("__name__", "baz_count"),
ct: int64p(1520430000), ct: int64p(1520872609125),
}, { }, {
m: `baz_sum`, m: `baz_sum`,
v: 324789.3, v: 324789.3,
lset: labels.FromStrings("__name__", "baz_sum"), lset: labels.FromStrings("__name__", "baz_sum"),
ct: int64p(1520430000), ct: int64p(1520872609125),
}, { }, {
m: "fizz_created", m: "fizz_created",
help: "Gauge which shouldn't be parsed as CT", help: "Gauge which shouldn't be parsed as CT",
@ -320,6 +341,84 @@ fizz_created 17.0`
m: `fizz_created`, m: `fizz_created`,
v: 17, v: 17,
lset: labels.FromStrings("__name__", "fizz_created"), lset: labels.FromStrings("__name__", "fizz_created"),
}, {
m: "something",
help: "Histogram with _created between buckets and summary",
}, {
m: "something",
typ: model.MetricTypeHistogram,
}, {
m: `something_count`,
v: 18,
lset: labels.FromStrings("__name__", "something_count"),
ct: int64p(1520430001000),
}, {
m: `something_sum`,
v: 324789.4,
lset: labels.FromStrings("__name__", "something_sum"),
ct: int64p(1520430001000),
}, {
m: `something_bucket{le="0.0"}`,
v: 1,
lset: labels.FromStrings("__name__", "something_bucket", "le", "0.0"),
ct: int64p(1520430001000),
}, {
m: `something_bucket{le="+Inf"}`,
v: 18,
lset: labels.FromStrings("__name__", "something_bucket", "le", "+Inf"),
ct: int64p(1520430001000),
}, {
m: "yum",
help: "Summary with _created between sum and quantiles",
}, {
m: "yum",
typ: model.MetricTypeSummary,
}, {
m: `yum_count`,
v: 20,
lset: labels.FromStrings("__name__", "yum_count"),
ct: int64p(1520430003000),
}, {
m: `yum_sum`,
v: 324789.5,
lset: labels.FromStrings("__name__", "yum_sum"),
ct: int64p(1520430003000),
}, {
m: `yum{quantile="0.95"}`,
v: 123.7,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.95"),
ct: int64p(1520430003000),
}, {
m: `yum{quantile="0.99"}`,
v: 150.0,
lset: labels.FromStrings("__name__", "yum", "quantile", "0.99"),
ct: int64p(1520430003000),
}, {
m: "foobar",
help: "Summary with _created as the first line",
}, {
m: "foobar",
typ: model.MetricTypeSummary,
}, {
m: `foobar_count`,
v: 21,
lset: labels.FromStrings("__name__", "foobar_count"),
ct: int64p(1520430004000),
}, {
m: `foobar_sum`,
v: 324789.6,
lset: labels.FromStrings("__name__", "foobar_sum"),
ct: int64p(1520430004000),
}, {
m: `foobar{quantile="0.95"}`,
v: 123.8,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.95"),
ct: int64p(1520430004000),
}, {
m: `foobar{quantile="0.99"}`,
v: 150.1,
lset: labels.FromStrings("__name__", "foobar", "quantile", "0.99"),
ct: int64p(1520430004000),
}, { }, {
m: "metric", m: "metric",
help: "foo\x00bar", help: "foo\x00bar",
@ -346,7 +445,7 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
# UNIT "go.gc_duration_seconds" seconds # UNIT "go.gc_duration_seconds" seconds
{"go.gc_duration_seconds",quantile="0"} 4.9351e-05 {"go.gc_duration_seconds",quantile="0"} 4.9351e-05
{"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05 {"go.gc_duration_seconds",quantile="0.25"} 7.424100000000001e-05
{"go.gc_duration_seconds_created"} 12313 {"go.gc_duration_seconds_created"} 1520872607.123
{"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05 {"go.gc_duration_seconds",quantile="0.5",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05
{"http.status",q="0.9",a="b"} 8.3835e-05 {"http.status",q="0.9",a="b"} 8.3835e-05
@ -370,12 +469,12 @@ func TestUTF8OpenMetricsParse(t *testing.T) {
m: `{"go.gc_duration_seconds",quantile="0"}`, m: `{"go.gc_duration_seconds",quantile="0"}`,
v: 4.9351e-05, v: 4.9351e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"), lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0"),
ct: int64p(12313), ct: int64p(1520872607123),
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0.25"}`, m: `{"go.gc_duration_seconds",quantile="0.25"}`,
v: 7.424100000000001e-05, v: 7.424100000000001e-05,
lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"), lset: labels.FromStrings("__name__", "go.gc_duration_seconds", "quantile", "0.25"),
ct: int64p(12313), ct: int64p(1520872607123),
}, { }, {
m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`, m: `{"go.gc_duration_seconds",quantile="0.5",a="b"}`,
v: 8.3835e-05, v: 8.3835e-05,
@ -699,12 +798,12 @@ func TestOpenMetricsParseErrors(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
var err error var err error
for err == nil { for err == nil {
_, err = p.Next() _, err = p.Next()
} }
require.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input) require.EqualError(t, err, c.err, "test %d: %s", i, c.input)
} }
} }
@ -764,18 +863,18 @@ func TestOMNullByteHandling(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable()) p := NewOpenMetricsParser([]byte(c.input), labels.NewSymbolTable(), WithOMParserCTSeriesSkipped())
var err error var err error
for err == nil { for err == nil {
_, err = p.Next() _, err = p.Next()
} }
if c.err == "" { if c.err == "" {
require.Equal(t, io.EOF, err, "test %d", i) require.ErrorIs(t, err, io.EOF, "test %d", i)
continue continue
} }
require.Equal(t, c.err, err.Error(), "test %d", i) require.EqualError(t, err, c.err, "test %d", i)
} }
} }
@ -783,34 +882,13 @@ func TestOMNullByteHandling(t *testing.T) {
// these tests show them. // these tests show them.
// TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. // TODO(maniktherana): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this.
func TestCTParseFailures(t *testing.T) { func TestCTParseFailures(t *testing.T) {
input := `# HELP something Histogram with _created between buckets and summary input := `# HELP thing Histogram with _created as first line
# TYPE something histogram
something_count 17
something_sum 324789.3
something_created 1520430001
something_bucket{le="0.0"} 0
something_bucket{le="+Inf"} 17
# HELP thing Histogram with _created as first line
# TYPE thing histogram # TYPE thing histogram
thing_created 1520430002 thing_created 1520872607.123
thing_count 17 thing_count 17
thing_sum 324789.3 thing_sum 324789.3
thing_bucket{le="0.0"} 0 thing_bucket{le="0.0"} 0
thing_bucket{le="+Inf"} 17 thing_bucket{le="+Inf"} 17`
# HELP yum Summary with _created between sum and quantiles
# TYPE yum summary
yum_count 17.0
yum_sum 324789.3
yum_created 1520430003
yum{quantile="0.95"} 123.7
yum{quantile="0.99"} 150.0
# HELP foobar Summary with _created as the first line
# TYPE foobar summary
foobar_created 1520430004
foobar_count 17.0
foobar_sum 324789.3
foobar{quantile="0.95"} 123.7
foobar{quantile="0.99"} 150.0`
input += "\n# EOF\n" input += "\n# EOF\n"
@ -826,30 +904,6 @@ foobar{quantile="0.99"} 150.0`
exp := []expectCT{ exp := []expectCT{
{ {
m: "something",
help: "Histogram with _created between buckets and summary",
isErr: false,
}, {
m: "something",
typ: model.MetricTypeHistogram,
isErr: false,
}, {
m: `something_count`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_sum`,
ct: int64p(1520430001),
isErr: false,
}, {
m: `something_bucket{le="0.0"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: `something_bucket{le="+Inf"}`,
ct: int64p(1520430001),
isErr: true,
}, {
m: "thing", m: "thing",
help: "Histogram with _created as first line", help: "Histogram with _created as first line",
isErr: false, isErr: false,
@ -859,67 +913,19 @@ foobar{quantile="0.99"} 150.0`
isErr: false, isErr: false,
}, { }, {
m: `thing_count`, m: `thing_count`,
ct: int64p(1520430002), ct: int64p(1520872607123),
isErr: true, isErr: true,
}, { }, {
m: `thing_sum`, m: `thing_sum`,
ct: int64p(1520430002), ct: int64p(1520872607123),
isErr: true, isErr: true,
}, { }, {
m: `thing_bucket{le="0.0"}`, m: `thing_bucket{le="0.0"}`,
ct: int64p(1520430002), ct: int64p(1520872607123),
isErr: true, isErr: true,
}, { }, {
m: `thing_bucket{le="+Inf"}`, m: `thing_bucket{le="+Inf"}`,
ct: int64p(1520430002), ct: int64p(1520872607123),
isErr: true,
}, {
m: "yum",
help: "Summary with _created between summary and quantiles",
isErr: false,
}, {
m: "yum",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "yum_count",
ct: int64p(1520430003),
isErr: false,
}, {
m: "yum_sum",
ct: int64p(1520430003),
isErr: false,
}, {
m: `yum{quantile="0.95"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: `yum{quantile="0.99"}`,
ct: int64p(1520430003),
isErr: true,
}, {
m: "foobar",
help: "Summary with _created as the first line",
isErr: false,
}, {
m: "foobar",
typ: model.MetricTypeSummary,
isErr: false,
}, {
m: "foobar_count",
ct: int64p(1520430004),
isErr: true,
}, {
m: "foobar_sum",
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.95"}`,
ct: int64p(1520430004),
isErr: true,
}, {
m: `foobar{quantile="0.99"}`,
ct: int64p(1520430004),
isErr: true, isErr: true,
}, },
} }
@ -951,44 +957,3 @@ foobar{quantile="0.99"} 150.0`
i++ i++
} }
} }
func TestDeepCopy(t *testing.T) {
input := []byte(`# HELP go_goroutines A gauge goroutines.
# TYPE go_goroutines gauge
go_goroutines 33 123.123
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds
go_gc_duration_seconds_created`)
st := labels.NewSymbolTable()
parser := NewOpenMetricsParser(input, st, WithOMParserCTSeriesSkipped()).(*OpenMetricsParser)
// Modify the original parser state
_, err := parser.Next()
require.NoError(t, err)
require.Equal(t, "go_goroutines", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.True(t, parser.skipCTSeries)
// Create a deep copy of the parser
copyParser := deepCopy(parser)
etype, err := copyParser.Next()
require.NoError(t, err)
require.Equal(t, EntryType, etype)
require.True(t, parser.skipCTSeries)
require.False(t, copyParser.skipCTSeries)
// Modify the original parser further
parser.Next()
parser.Next()
parser.Next()
require.Equal(t, "go_gc_duration_seconds", string(parser.l.b[parser.offsets[0]:parser.offsets[1]]))
require.Equal(t, "summary", string(parser.mtype))
require.False(t, copyParser.skipCTSeries)
require.True(t, parser.skipCTSeries)
// Ensure the copy remains unchanged
copyParser.Next()
copyParser.Next()
require.Equal(t, "go_gc_duration_seconds", string(copyParser.l.b[copyParser.offsets[0]:copyParser.offsets[1]]))
require.False(t, copyParser.skipCTSeries)
}

View file

@ -14,17 +14,13 @@
package textparse package textparse
import ( import (
"bytes"
"errors" "errors"
"io" "io"
"os"
"strings" "strings"
"testing" "testing"
"github.com/klauspost/compress/gzip"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/exemplar"
@ -423,8 +419,7 @@ func TestPromParseErrors(t *testing.T) {
for err == nil { for err == nil {
_, err = p.Next() _, err = p.Next()
} }
require.Error(t, err) require.EqualError(t, err, c.err, "test %d", i)
require.Equal(t, c.err, err.Error(), "test %d", i)
} }
} }
@ -483,194 +478,6 @@ func TestPromNullByteHandling(t *testing.T) {
continue continue
} }
require.Error(t, err) require.EqualError(t, err, c.err, "test %d", i)
require.Equal(t, c.err, err.Error(), "test %d", i)
}
}
const (
promtestdataSampleCount = 410
)
func BenchmarkParse(b *testing.B) {
for parserName, parser := range map[string]func([]byte, *labels.SymbolTable) Parser{
"prometheus": NewPromParser,
"openmetrics": func(b []byte, st *labels.SymbolTable) Parser {
return NewOpenMetricsParser(b, st)
},
} {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
f, err := os.Open(fn)
require.NoError(b, err)
defer f.Close()
buf, err := io.ReadAll(f)
require.NoError(b, err)
b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) {
total := 0
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
b.ReportAllocs()
b.ResetTimer()
st := labels.NewSymbolTable()
for i := 0; i < b.N; i += promtestdataSampleCount {
p := parser(buf, st)
Outer:
for i < b.N {
t, err := p.Next()
switch t {
case EntryInvalid:
if errors.Is(err, io.EOF) {
break Outer
}
b.Fatal(err)
case EntrySeries:
m, _, _ := p.Series()
total += len(m)
i++
}
}
}
_ = total
})
b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) {
total := 0
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
b.ReportAllocs()
b.ResetTimer()
st := labels.NewSymbolTable()
for i := 0; i < b.N; i += promtestdataSampleCount {
p := parser(buf, st)
Outer:
for i < b.N {
t, err := p.Next()
switch t {
case EntryInvalid:
if errors.Is(err, io.EOF) {
break Outer
}
b.Fatal(err)
case EntrySeries:
m, _, _ := p.Series()
var res labels.Labels
p.Metric(&res)
total += len(m)
i++
}
}
}
_ = total
})
b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) {
total := 0
var res labels.Labels
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
b.ReportAllocs()
b.ResetTimer()
st := labels.NewSymbolTable()
for i := 0; i < b.N; i += promtestdataSampleCount {
p := parser(buf, st)
Outer:
for i < b.N {
t, err := p.Next()
switch t {
case EntryInvalid:
if errors.Is(err, io.EOF) {
break Outer
}
b.Fatal(err)
case EntrySeries:
m, _, _ := p.Series()
p.Metric(&res)
total += len(m)
i++
}
}
}
_ = total
})
b.Run("expfmt-text/"+fn, func(b *testing.B) {
if parserName != "prometheus" {
b.Skip()
}
b.SetBytes(int64(len(buf) / promtestdataSampleCount))
b.ReportAllocs()
b.ResetTimer()
total := 0
for i := 0; i < b.N; i += promtestdataSampleCount {
decSamples := make(model.Vector, 0, 50)
sdec := expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.NewFormat(expfmt.TypeTextPlain)),
Opts: &expfmt.DecodeOptions{
Timestamp: model.TimeFromUnixNano(0),
},
}
for {
if err = sdec.Decode(&decSamples); err != nil {
break
}
total += len(decSamples)
decSamples = decSamples[:0]
}
}
_ = total
})
}
}
}
func BenchmarkGzip(b *testing.B) {
for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
b.Run(fn, func(b *testing.B) {
f, err := os.Open(fn)
require.NoError(b, err)
defer f.Close()
var buf bytes.Buffer
gw := gzip.NewWriter(&buf)
n, err := io.Copy(gw, f)
require.NoError(b, err)
require.NoError(b, gw.Close())
gbuf, err := io.ReadAll(&buf)
require.NoError(b, err)
k := b.N / promtestdataSampleCount
b.ReportAllocs()
b.SetBytes(n / promtestdataSampleCount)
b.ResetTimer()
total := 0
for i := 0; i < k; i++ {
gr, err := gzip.NewReader(bytes.NewReader(gbuf))
require.NoError(b, err)
d, err := io.ReadAll(gr)
require.NoError(b, err)
require.NoError(b, gr.Close())
total += len(d)
}
_ = total
})
} }
} }

View file

@ -32,7 +32,9 @@ import (
dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" dto "github.com/prometheus/prometheus/prompb/io/prometheus/client"
) )
func createTestProtoBuf(t *testing.T) *bytes.Buffer { func createTestProtoBuf(t testing.TB) *bytes.Buffer {
t.Helper()
testMetricFamilies := []string{ testMetricFamilies := []string{
`name: "go_build_info" `name: "go_build_info"
help: "Build information about the main Go module." help: "Build information about the main Go module."

64
model/textparse/testdata/omtestdata.txt vendored Normal file
View file

@ -0,0 +1,64 @@
# HELP go_build_info Build information about the main Go module.
# TYPE go_build_info gauge
go_build_info{checksum="",path="",version=""} 1.0
# HELP promhttp_metric_handler_errors Total number of internal errors encountered by the promhttp metric handler.
# TYPE promhttp_metric_handler_errors counter
promhttp_metric_handler_errors_total{cause="encoding"} 0.0
promhttp_metric_handler_errors_created{cause="encoding"} 1.726839813016397e+09
promhttp_metric_handler_errors_total{cause="gathering"} 0.0
promhttp_metric_handler_errors_created{cause="gathering"} 1.726839813016395e+09
# HELP rpc_durations_histogram_seconds RPC latency distributions.
# TYPE rpc_durations_histogram_seconds histogram
rpc_durations_histogram_seconds_bucket{le="-0.00099"} 0
rpc_durations_histogram_seconds_bucket{le="-0.00089"} 0
rpc_durations_histogram_seconds_bucket{le="-0.0007899999999999999"} 0
rpc_durations_histogram_seconds_bucket{le="-0.0006899999999999999"} 0
rpc_durations_histogram_seconds_bucket{le="-0.0005899999999999998"} 0
rpc_durations_histogram_seconds_bucket{le="-0.0004899999999999998"} 0
rpc_durations_histogram_seconds_bucket{le="-0.0003899999999999998"} 0
rpc_durations_histogram_seconds_bucket{le="-0.0002899999999999998"} 3 # {dummyID="17783"} -0.0003825067330956884 1.7268398142239082e+09
rpc_durations_histogram_seconds_bucket{le="-0.0001899999999999998"} 5 # {dummyID="84741"} -0.00020178290006788965 1.726839814829977e+09
rpc_durations_histogram_seconds_bucket{le="-8.999999999999979e-05"} 5
rpc_durations_histogram_seconds_bucket{le="1.0000000000000216e-05"} 8 # {dummyID="19206"} -4.6156147425468016e-05 1.7268398151337721e+09
rpc_durations_histogram_seconds_bucket{le="0.00011000000000000022"} 9 # {dummyID="3974"} 9.528436760156754e-05 1.726839814526797e+09
rpc_durations_histogram_seconds_bucket{le="0.00021000000000000023"} 11 # {dummyID="29640"} 0.00017459624183458996 1.7268398139220061e+09
rpc_durations_histogram_seconds_bucket{le="0.0003100000000000002"} 15 # {dummyID="9818"} 0.0002791130914009552 1.7268398149821382e+09
rpc_durations_histogram_seconds_bucket{le="0.0004100000000000002"} 15
rpc_durations_histogram_seconds_bucket{le="0.0005100000000000003"} 15
rpc_durations_histogram_seconds_bucket{le="0.0006100000000000003"} 15
rpc_durations_histogram_seconds_bucket{le="0.0007100000000000003"} 15
rpc_durations_histogram_seconds_bucket{le="0.0008100000000000004"} 15
rpc_durations_histogram_seconds_bucket{le="0.0009100000000000004"} 15
rpc_durations_histogram_seconds_bucket{le="+Inf"} 15
rpc_durations_histogram_seconds_sum -8.452185437166741e-05
rpc_durations_histogram_seconds_count 15
rpc_durations_histogram_seconds_created 1.726839813016302e+09
# HELP rpc_durations_seconds RPC latency distributions.
# TYPE rpc_durations_seconds summary
rpc_durations_seconds{service="exponential",quantile="0.5"} 7.689368882420941e-07
rpc_durations_seconds{service="exponential",quantile="0.9"} 1.6537614174305048e-06
rpc_durations_seconds{service="exponential",quantile="0.99"} 2.0965499063061924e-06
rpc_durations_seconds_sum{service="exponential"} 2.0318666372575776e-05
rpc_durations_seconds_count{service="exponential"} 22
rpc_durations_seconds_created{service="exponential"} 1.7268398130168908e+09
rpc_durations_seconds{service="normal",quantile="0.5"} -5.066758674917046e-06
rpc_durations_seconds{service="normal",quantile="0.9"} 0.0002935723711788224
rpc_durations_seconds{service="normal",quantile="0.99"} 0.0003023094636293776
rpc_durations_seconds_sum{service="normal"} -8.452185437166741e-05
rpc_durations_seconds_count{service="normal"} 15
rpc_durations_seconds_created{service="normal"} 1.726839813016714e+09
rpc_durations_seconds{service="uniform",quantile="0.5"} 9.005014931474918e-05
rpc_durations_seconds{service="uniform",quantile="0.9"} 0.00017801230208182325
rpc_durations_seconds{service="uniform",quantile="0.99"} 0.00018641524538180192
rpc_durations_seconds_sum{service="uniform"} 0.0011666095700533677
rpc_durations_seconds_count{service="uniform"} 11
rpc_durations_seconds_created{service="uniform"} 1.72683981301684e+09
# HELP rpc_requests Total number of RPC requests received.
# TYPE rpc_requests counter
rpc_requests_total{service="exponential"} 22.0
rpc_requests_created{service="exponential"} 1.726839813016893e+09
rpc_requests_total{service="normal"} 15.0
rpc_requests_created{service="normal"} 1.726839813016717e+09
rpc_requests_total{service="uniform"} 11.0
rpc_requests_created{service="uniform"} 1.7268398130168471e+09
# EOF

View file

@ -770,6 +770,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
s.mtx.Lock() s.mtx.Lock()
defer s.mtx.Unlock() defer s.mtx.Unlock()
previousAms := s.ams
// Set new Alertmanagers and deduplicate them along their unique URL. // Set new Alertmanagers and deduplicate them along their unique URL.
s.ams = []alertmanager{} s.ams = []alertmanager{}
s.droppedAms = []alertmanager{} s.droppedAms = []alertmanager{}
@ -789,6 +790,17 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
seen[us] = struct{}{} seen[us] = struct{}{}
s.ams = append(s.ams, am) s.ams = append(s.ams, am)
} }
// Now remove counters for any removed Alertmanagers.
for _, am := range previousAms {
us := am.url().String()
if _, ok := seen[us]; ok {
continue
}
s.metrics.latency.DeleteLabelValues(us)
s.metrics.sent.DeleteLabelValues(us)
s.metrics.errors.DeleteLabelValues(us)
seen[us] = struct{}{}
}
} }
func postPath(pre string, v config.AlertmanagerAPIVersion) string { func postPath(pre string, v config.AlertmanagerAPIVersion) string {

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"math"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -29,11 +30,13 @@ import (
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/model/value"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/promql/promqltest" "github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/stats"
"github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/teststorage"
@ -3781,3 +3784,115 @@ func TestRateAnnotations(t *testing.T) {
}) })
} }
} }
func TestHistogramRateWithFloatStaleness(t *testing.T) {
// Make a chunk with two normal histograms of the same value.
h1 := histogram.Histogram{
Schema: 2,
Count: 10,
Sum: 100,
PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}},
PositiveBuckets: []int64{100},
}
c1 := chunkenc.NewHistogramChunk()
app, err := c1.Appender()
require.NoError(t, err)
var (
newc chunkenc.Chunk
recoded bool
)
newc, recoded, app, err = app.AppendHistogram(nil, 0, h1.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
newc, recoded, _, err = app.AppendHistogram(nil, 10, h1.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
// Make a chunk with a single float stale marker.
c2 := chunkenc.NewXORChunk()
app, err = c2.Appender()
require.NoError(t, err)
app.Append(20, math.Float64frombits(value.StaleNaN))
// Make a chunk with two normal histograms that have zero value.
h2 := histogram.Histogram{
Schema: 2,
}
c3 := chunkenc.NewHistogramChunk()
app, err = c3.Appender()
require.NoError(t, err)
newc, recoded, app, err = app.AppendHistogram(nil, 30, h2.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
newc, recoded, _, err = app.AppendHistogram(nil, 40, h2.Copy(), false)
require.NoError(t, err)
require.False(t, recoded)
require.Nil(t, newc)
querier := storage.MockQuerier{
SelectMockFunction: func(_ bool, _ *storage.SelectHints, _ ...*labels.Matcher) storage.SeriesSet {
return &singleSeriesSet{
series: mockSeries{chunks: []chunkenc.Chunk{c1, c2, c3}, labelSet: []string{"__name__", "foo"}},
}
},
}
queriable := storage.MockQueryable{MockQuerier: &querier}
engine := promqltest.NewTestEngine(t, false, 0, promqltest.DefaultMaxSamplesPerQuery)
q, err := engine.NewInstantQuery(context.Background(), &queriable, nil, "rate(foo[40s])", timestamp.Time(45))
require.NoError(t, err)
defer q.Close()
res := q.Exec(context.Background())
require.NoError(t, res.Err)
vec, err := res.Vector()
require.NoError(t, err)
// Single sample result.
require.Len(t, vec, 1)
// The result is a histogram.
require.NotNil(t, vec[0].H)
// The result should be zero as the histogram has not increased, so the rate is zero.
require.Equal(t, 0.0, vec[0].H.Count)
require.Equal(t, 0.0, vec[0].H.Sum)
}
type singleSeriesSet struct {
series storage.Series
consumed bool
}
func (s *singleSeriesSet) Next() bool { c := s.consumed; s.consumed = true; return !c }
func (s singleSeriesSet) At() storage.Series { return s.series }
func (s singleSeriesSet) Err() error { return nil }
func (s singleSeriesSet) Warnings() annotations.Annotations { return nil }
type mockSeries struct {
chunks []chunkenc.Chunk
labelSet []string
}
func (s mockSeries) Labels() labels.Labels {
return labels.FromStrings(s.labelSet...)
}
func (s mockSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator {
iterables := []chunkenc.Iterator{}
for _, c := range s.chunks {
iterables = append(iterables, c.Iterator(nil))
}
return storage.ChainSampleIteratorFromIterators(it, iterables)
}

View file

@ -1514,11 +1514,6 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio
return matrix, ws return matrix, ws
} }
// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) ===
func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
panic("funcLabelReplace wrong implementation called")
}
// === Vector(s Scalar) (Vector, Annotations) === // === Vector(s Scalar) (Vector, Annotations) ===
func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return append(enh.Out, return append(enh.Out,
@ -1570,11 +1565,6 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions)
return matrix, ws return matrix, ws
} }
// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) ===
func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
panic("funcLabelReplace wrong implementation called")
}
// Common code for date related functions. // Common code for date related functions.
func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector {
if len(vals) == 0 { if len(vals) == 0 {
@ -1696,8 +1686,8 @@ var FunctionCalls = map[string]FunctionCall{
"idelta": funcIdelta, "idelta": funcIdelta,
"increase": funcIncrease, "increase": funcIncrease,
"irate": funcIrate, "irate": funcIrate,
"label_replace": funcLabelReplace, "label_replace": nil, // evalLabelReplace not called via this map.
"label_join": funcLabelJoin, "label_join": nil, // evalLabelJoin not called via this map.
"ln": funcLn, "ln": funcLn,
"log10": funcLog10, "log10": funcLog10,
"log2": funcLog2, "log2": funcLog2,

View file

@ -61,7 +61,7 @@ const (
var symbolTable = labels.NewSymbolTable() var symbolTable = labels.NewSymbolTable()
func fuzzParseMetricWithContentType(in []byte, contentType string) int { func fuzzParseMetricWithContentType(in []byte, contentType string) int {
p, warning := textparse.New(in, contentType, false, symbolTable) p, warning := textparse.New(in, contentType, false, false, symbolTable)
if warning != nil { if warning != nil {
// An invalid content type is being passed, which should not happen // An invalid content type is being passed, which should not happen
// in this context. // in this context.

View file

@ -29,7 +29,7 @@ func TestfuzzParseMetricWithContentTypePanicOnInvalid(t *testing.T) {
} else { } else {
err, ok := p.(error) err, ok := p.(error)
require.True(t, ok) require.True(t, ok)
require.Contains(t, err.Error(), "duplicate parameter name") require.ErrorContains(t, err, "duplicate parameter name")
} }
}() }()

View file

@ -3925,8 +3925,7 @@ func TestParseExpressions(t *testing.T) {
require.Equal(t, expected, expr, "error on input '%s'", test.input) require.Equal(t, expected, expr, "error on input '%s'", test.input)
} else { } else {
require.Error(t, err) require.ErrorContains(t, err, test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())
var errorList ParseErrors var errorList ParseErrors
ok := errors.As(err, &errorList) ok := errors.As(err, &errorList)
@ -4468,7 +4467,7 @@ func TestRecoverParserError(t *testing.T) {
e := errors.New("custom error") e := errors.New("custom error")
defer func() { defer func() {
require.Equal(t, e.Error(), err.Error()) require.EqualError(t, err, e.Error())
}() }()
defer p.recover(&err) defer p.recover(&err)

View file

@ -47,8 +47,8 @@ import (
var ( var (
patSpace = regexp.MustCompile("[\t ]+") patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
) )
const ( const (
@ -322,6 +322,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
cmd.fail = true cmd.fail = true
case "warn": case "warn":
cmd.warn = true cmd.warn = true
case "info":
cmd.info = true
} }
for j := 1; i+1 < len(lines); j++ { for j := 1; i+1 < len(lines); j++ {
@ -601,7 +603,7 @@ type evalCmd struct {
line int line int
isRange bool // if false, instant query isRange bool // if false, instant query
fail, warn, ordered bool fail, warn, ordered, info bool
expectedFailMessage string expectedFailMessage string
expectedFailRegexp *regexp.Regexp expectedFailRegexp *regexp.Regexp
@ -1151,13 +1153,16 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq
if res.Err == nil && cmd.fail { if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
} }
countWarnings, _ := res.Warnings.CountWarningsAndInfo() countWarnings, countInfo := res.Warnings.CountWarningsAndInfo()
if !cmd.warn && countWarnings > 0 { if !cmd.warn && countWarnings > 0 {
return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings)
} }
if cmd.warn && countWarnings == 0 { if cmd.warn && countWarnings == 0 {
return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line)
} }
if cmd.info && countInfo == 0 {
return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value) err = cmd.compareResult(res.Value)
if err != nil { if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)

View file

@ -55,6 +55,10 @@ func (a nopAppender) AppendHistogram(storage.SeriesRef, labels.Labels, int64, *h
return 0, nil return 0, nil
} }
func (a nopAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
return 0, nil
}
func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { func (a nopAppender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
return 0, nil return 0, nil
} }
@ -78,6 +82,7 @@ func equalFloatSamples(a, b floatSample) bool {
} }
type histogramSample struct { type histogramSample struct {
metric labels.Labels
t int64 t int64
h *histogram.Histogram h *histogram.Histogram
fh *histogram.FloatHistogram fh *histogram.FloatHistogram
@ -146,7 +151,7 @@ func (a *collectResultAppender) AppendExemplar(ref storage.SeriesRef, l labels.L
func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
a.mtx.Lock() a.mtx.Lock()
defer a.mtx.Unlock() defer a.mtx.Unlock()
a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t}) a.pendingHistograms = append(a.pendingHistograms, histogramSample{h: h, fh: fh, t: t, metric: l})
if a.next == nil { if a.next == nil {
return 0, nil return 0, nil
} }
@ -154,6 +159,13 @@ func (a *collectResultAppender) AppendHistogram(ref storage.SeriesRef, l labels.
return a.next.AppendHistogram(ref, l, t, h, fh) return a.next.AppendHistogram(ref, l, t, h, fh)
} }
func (a *collectResultAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if h != nil {
return a.AppendHistogram(ref, l, ct, &histogram.Histogram{}, nil)
}
return a.AppendHistogram(ref, l, ct, nil, &histogram.FloatHistogram{})
}
func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { func (a *collectResultAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
a.mtx.Lock() a.mtx.Lock()
defer a.mtx.Unlock() defer a.mtx.Unlock()

View file

@ -71,7 +71,6 @@ func NewManager(o *Options, logger log.Logger, newScrapeFailureLogger func(strin
// Options are the configuration parameters to the scrape manager. // Options are the configuration parameters to the scrape manager.
type Options struct { type Options struct {
ExtraMetrics bool ExtraMetrics bool
NoDefaultPort bool
// Option used by downstream scraper users like OpenTelemetry Collector // Option used by downstream scraper users like OpenTelemetry Collector
// to help lookup metric metadata. Should be false for Prometheus. // to help lookup metric metadata. Should be false for Prometheus.
PassMetadataInContext bool PassMetadataInContext bool

View file

@ -14,6 +14,7 @@
package scrape package scrape
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
@ -30,17 +31,22 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
_ "github.com/prometheus/prometheus/discovery/file" _ "github.com/prometheus/prometheus/discovery/file"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/runutil" "github.com/prometheus/prometheus/util/runutil"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
) )
@ -54,7 +60,6 @@ func TestPopulateLabels(t *testing.T) {
cases := []struct { cases := []struct {
in labels.Labels in labels.Labels
cfg *config.ScrapeConfig cfg *config.ScrapeConfig
noDefaultPort bool
res labels.Labels res labels.Labels
resOrig labels.Labels resOrig labels.Labels
err string err string
@ -111,8 +116,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80", model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4",
model.SchemeLabel: "http", model.SchemeLabel: "http",
model.MetricsPathLabel: "/custom", model.MetricsPathLabel: "/custom",
model.JobLabel: "custom-job", model.JobLabel: "custom-job",
@ -142,7 +147,7 @@ func TestPopulateLabels(t *testing.T) {
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "[::1]:443", model.AddressLabel: "[::1]",
model.InstanceLabel: "custom-instance", model.InstanceLabel: "custom-instance",
model.SchemeLabel: "https", model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics", model.MetricsPathLabel: "/metrics",
@ -365,7 +370,6 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second), ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
noDefaultPort: true,
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4", model.AddressLabel: "1.2.3.4",
model.InstanceLabel: "1.2.3.4", model.InstanceLabel: "1.2.3.4",
@ -384,7 +388,7 @@ func TestPopulateLabels(t *testing.T) {
model.ScrapeTimeoutLabel: "1s", model.ScrapeTimeoutLabel: "1s",
}), }),
}, },
// Remove default port (http). // verify that the default port is not removed (http).
{ {
in: labels.FromMap(map[string]string{ in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:80", model.AddressLabel: "1.2.3.4:80",
@ -396,9 +400,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second), ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
noDefaultPort: true,
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4", model.AddressLabel: "1.2.3.4:80",
model.InstanceLabel: "1.2.3.4:80", model.InstanceLabel: "1.2.3.4:80",
model.SchemeLabel: "http", model.SchemeLabel: "http",
model.MetricsPathLabel: "/metrics", model.MetricsPathLabel: "/metrics",
@ -415,7 +418,7 @@ func TestPopulateLabels(t *testing.T) {
model.ScrapeTimeoutLabel: "1s", model.ScrapeTimeoutLabel: "1s",
}), }),
}, },
// Remove default port (https). // verify that the default port is not removed (https).
{ {
in: labels.FromMap(map[string]string{ in: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4:443", model.AddressLabel: "1.2.3.4:443",
@ -427,9 +430,8 @@ func TestPopulateLabels(t *testing.T) {
ScrapeInterval: model.Duration(time.Second), ScrapeInterval: model.Duration(time.Second),
ScrapeTimeout: model.Duration(time.Second), ScrapeTimeout: model.Duration(time.Second),
}, },
noDefaultPort: true,
res: labels.FromMap(map[string]string{ res: labels.FromMap(map[string]string{
model.AddressLabel: "1.2.3.4", model.AddressLabel: "1.2.3.4:443",
model.InstanceLabel: "1.2.3.4:443", model.InstanceLabel: "1.2.3.4:443",
model.SchemeLabel: "https", model.SchemeLabel: "https",
model.MetricsPathLabel: "/metrics", model.MetricsPathLabel: "/metrics",
@ -450,7 +452,7 @@ func TestPopulateLabels(t *testing.T) {
for _, c := range cases { for _, c := range cases {
in := c.in.Copy() in := c.in.Copy()
res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg, c.noDefaultPort) res, orig, err := PopulateLabels(labels.NewBuilder(c.in), c.cfg)
if c.err != "" { if c.err != "" {
require.EqualError(t, err, c.err) require.EqualError(t, err, c.err)
} else { } else {
@ -721,37 +723,256 @@ scrape_configs:
require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools()) require.ElementsMatch(t, []string{"job1", "job3"}, scrapeManager.ScrapePools())
} }
// TestManagerCTZeroIngestion tests scrape manager for CT cases. func setupScrapeManager(t *testing.T, honorTimestamps, enableCTZeroIngestion bool) (*collectResultAppender, *Manager) {
app := &collectResultAppender{}
scrapeManager, err := NewManager(
&Options{
EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion,
skipOffsetting: true,
},
log.NewLogfmtLogger(os.Stderr),
nil,
&collectResultAppendable{app},
prometheus.NewRegistry(),
)
require.NoError(t, err)
require.NoError(t, scrapeManager.ApplyConfig(&config.Config{
GlobalConfig: config.GlobalConfig{
// Disable regular scrapes.
ScrapeInterval: model.Duration(9999 * time.Minute),
ScrapeTimeout: model.Duration(5 * time.Second),
ScrapeProtocols: []config.ScrapeProtocol{config.OpenMetricsText1_0_0, config.PrometheusProto},
},
ScrapeConfigs: []*config.ScrapeConfig{{JobName: "test", HonorTimestamps: honorTimestamps}},
}))
return app, scrapeManager
}
func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server {
once := sync.Once{}
server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fail := true
once.Do(func() {
fail = false
w.Header().Set("Content-Type", typ)
w.Write(toWrite)
})
if fail {
w.WriteHeader(http.StatusInternalServerError)
}
}),
)
t.Cleanup(func() { server.Close() })
return server
}
// TestManagerCTZeroIngestion tests scrape manager for various CT cases.
func TestManagerCTZeroIngestion(t *testing.T) { func TestManagerCTZeroIngestion(t *testing.T) {
const mName = "expected_counter" const (
// _total suffix is required, otherwise expfmt with OMText will mark metric as "unknown"
expectedMetricName = "expected_metric_total"
expectedCreatedMetricName = "expected_metric_created"
expectedSampleValue = 17.0
)
for _, testFormat := range []config.ScrapeProtocol{config.PrometheusProto, config.OpenMetricsText1_0_0} {
t.Run(fmt.Sprintf("format=%s", testFormat), func(t *testing.T) {
for _, testWithCT := range []bool{false, true} {
t.Run(fmt.Sprintf("withCT=%v", testWithCT), func(t *testing.T) {
for _, testCTZeroIngest := range []bool{false, true} {
t.Run(fmt.Sprintf("ctZeroIngest=%v", testCTZeroIngest), func(t *testing.T) {
sampleTs := time.Now()
ctTs := time.Time{}
if testWithCT {
ctTs = sampleTs.Add(-2 * time.Minute)
}
// TODO(bwplotka): Add more types than just counter?
encoded := prepareTestEncodedCounter(t, testFormat, expectedMetricName, expectedSampleValue, sampleTs, ctTs)
app, scrapeManager := setupScrapeManager(t, true, testCTZeroIngest)
// Perform the test.
doOneScrape(t, scrapeManager, app, setupTestServer(t, config.ScrapeProtocolsHeaders[testFormat], encoded))
// Verify results.
// Verify what we got vs expectations around CT injection.
samples := findSamplesForMetric(app.resultFloats, expectedMetricName)
if testWithCT && testCTZeroIngest {
require.Len(t, samples, 2)
require.Equal(t, 0.0, samples[0].f)
require.Equal(t, timestamp.FromTime(ctTs), samples[0].t)
require.Equal(t, expectedSampleValue, samples[1].f)
require.Equal(t, timestamp.FromTime(sampleTs), samples[1].t)
} else {
require.Len(t, samples, 1)
require.Equal(t, expectedSampleValue, samples[0].f)
require.Equal(t, timestamp.FromTime(sampleTs), samples[0].t)
}
// Verify what we got vs expectations around additional _created series for OM text.
// enableCTZeroInjection also kills that _created line.
createdSeriesSamples := findSamplesForMetric(app.resultFloats, expectedCreatedMetricName)
if testFormat == config.OpenMetricsText1_0_0 && testWithCT && !testCTZeroIngest {
// For OM Text, when counter has CT, and feature flag disabled we should see _created lines.
require.Len(t, createdSeriesSamples, 1)
// Conversion taken from common/expfmt.writeOpenMetricsFloat.
// We don't check the ct timestamp as explicit ts was not implemented in expfmt.Encoder,
// but exists in OM https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#:~:text=An%20example%20with%20a%20Metric%20with%20no%20labels%2C%20and%20a%20MetricPoint%20with%20a%20timestamp%20and%20a%20created
// We can implement this, but we want to potentially get rid of OM 1.0 CT lines
require.Equal(t, float64(timestamppb.New(ctTs).AsTime().UnixNano())/1e9, createdSeriesSamples[0].f)
} else {
require.Empty(t, createdSeriesSamples)
}
})
}
})
}
})
}
}
func prepareTestEncodedCounter(t *testing.T, format config.ScrapeProtocol, mName string, v float64, ts, ct time.Time) (encoded []byte) {
t.Helper()
counter := &dto.Counter{Value: proto.Float64(v)}
if !ct.IsZero() {
counter.CreatedTimestamp = timestamppb.New(ct)
}
ctrType := dto.MetricType_COUNTER
inputMetric := &dto.MetricFamily{
Name: proto.String(mName),
Type: &ctrType,
Metric: []*dto.Metric{{
TimestampMs: proto.Int64(timestamp.FromTime(ts)),
Counter: counter,
}},
}
switch format {
case config.PrometheusProto:
return protoMarshalDelimited(t, inputMetric)
case config.OpenMetricsText1_0_0:
buf := &bytes.Buffer{}
require.NoError(t, expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeOpenMetrics), expfmt.WithCreatedLines(), expfmt.WithUnit()).Encode(inputMetric))
_, _ = buf.WriteString("# EOF")
t.Log("produced OM text to expose:", buf.String())
return buf.Bytes()
default:
t.Fatalf("not implemented format: %v", format)
return nil
}
}
func doOneScrape(t *testing.T, manager *Manager, appender *collectResultAppender, server *httptest.Server) {
t.Helper()
serverURL, err := url.Parse(server.URL)
require.NoError(t, err)
// Add fake target directly into tsets + reload
manager.updateTsets(map[string][]*targetgroup.Group{
"test": {{
Targets: []model.LabelSet{{
model.SchemeLabel: model.LabelValue(serverURL.Scheme),
model.AddressLabel: model.LabelValue(serverURL.Host),
}},
}},
})
manager.reload()
// Wait for one scrape.
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
require.NoError(t, runutil.Retry(100*time.Millisecond, ctx.Done(), func() error {
appender.mtx.Lock()
defer appender.mtx.Unlock()
// Check if scrape happened and grab the relevant samples.
if len(appender.resultFloats) > 0 {
return nil
}
return fmt.Errorf("expected some float samples, got none")
}), "after 1 minute")
manager.Stop()
}
func findSamplesForMetric(floats []floatSample, metricName string) (ret []floatSample) {
for _, f := range floats {
if f.metric.Get(model.MetricNameLabel) == metricName {
ret = append(ret, f)
}
}
return ret
}
// generateTestHistogram generates the same thing as tsdbutil.GenerateTestHistogram,
// but in the form of dto.Histogram.
func generateTestHistogram(i int) *dto.Histogram {
helper := tsdbutil.GenerateTestHistogram(i)
h := &dto.Histogram{}
h.SampleCount = proto.Uint64(helper.Count)
h.SampleSum = proto.Float64(helper.Sum)
h.Schema = proto.Int32(helper.Schema)
h.ZeroThreshold = proto.Float64(helper.ZeroThreshold)
h.ZeroCount = proto.Uint64(helper.ZeroCount)
h.PositiveSpan = make([]*dto.BucketSpan, len(helper.PositiveSpans))
for i, span := range helper.PositiveSpans {
h.PositiveSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(span.Offset),
Length: proto.Uint32(span.Length),
}
}
h.PositiveDelta = helper.PositiveBuckets
h.NegativeSpan = make([]*dto.BucketSpan, len(helper.NegativeSpans))
for i, span := range helper.NegativeSpans {
h.NegativeSpan[i] = &dto.BucketSpan{
Offset: proto.Int32(span.Offset),
Length: proto.Uint32(span.Length),
}
}
h.NegativeDelta = helper.NegativeBuckets
return h
}
func TestManagerCTZeroIngestionHistogram(t *testing.T) {
const mName = "expected_histogram"
for _, tc := range []struct { for _, tc := range []struct {
name string name string
counterSample *dto.Counter inputHistSample *dto.Histogram
enableCTZeroIngestion bool enableCTZeroIngestion bool
}{ }{
{ {
name: "disabled with CT on counter", name: "disabled with CT on histogram",
counterSample: &dto.Counter{ inputHistSample: func() *dto.Histogram {
Value: proto.Float64(1.0), h := generateTestHistogram(0)
// Timestamp does not matter as long as it exists in this test. h.CreatedTimestamp = timestamppb.Now()
CreatedTimestamp: timestamppb.Now(), return h
}, }(),
enableCTZeroIngestion: false,
}, },
{ {
name: "enabled with CT on counter", name: "enabled with CT on histogram",
counterSample: &dto.Counter{ inputHistSample: func() *dto.Histogram {
Value: proto.Float64(1.0), h := generateTestHistogram(0)
// Timestamp does not matter as long as it exists in this test. h.CreatedTimestamp = timestamppb.Now()
CreatedTimestamp: timestamppb.Now(), return h
}, }(),
enableCTZeroIngestion: true, enableCTZeroIngestion: true,
}, },
{ {
name: "enabled without CT on counter", name: "enabled without CT on histogram",
counterSample: &dto.Counter{ inputHistSample: func() *dto.Histogram {
Value: proto.Float64(1.0), h := generateTestHistogram(0)
}, return h
}(),
enableCTZeroIngestion: true, enableCTZeroIngestion: true,
}, },
} { } {
@ -760,6 +981,7 @@ func TestManagerCTZeroIngestion(t *testing.T) {
scrapeManager, err := NewManager( scrapeManager, err := NewManager(
&Options{ &Options{
EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion, EnableCreatedTimestampZeroIngestion: tc.enableCTZeroIngestion,
EnableNativeHistogramsIngestion: true,
skipOffsetting: true, skipOffsetting: true,
}, },
log.NewLogfmtLogger(os.Stderr), log.NewLogfmtLogger(os.Stderr),
@ -785,16 +1007,16 @@ func TestManagerCTZeroIngestion(t *testing.T) {
// Start fake HTTP target to that allow one scrape only. // Start fake HTTP target to that allow one scrape only.
server := httptest.NewServer( server := httptest.NewServer(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fail := true fail := true // TODO(bwplotka): Kill or use?
once.Do(func() { once.Do(func() {
fail = false fail = false
w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`) w.Header().Set("Content-Type", `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`)
ctrType := dto.MetricType_COUNTER ctrType := dto.MetricType_HISTOGRAM
w.Write(protoMarshalDelimited(t, &dto.MetricFamily{ w.Write(protoMarshalDelimited(t, &dto.MetricFamily{
Name: proto.String(mName), Name: proto.String(mName),
Type: &ctrType, Type: &ctrType,
Metric: []*dto.Metric{{Counter: tc.counterSample}}, Metric: []*dto.Metric{{Histogram: tc.inputHistSample}},
})) }))
}) })
@ -820,7 +1042,8 @@ func TestManagerCTZeroIngestion(t *testing.T) {
}) })
scrapeManager.reload() scrapeManager.reload()
var got []float64 var got []histogramSample
// Wait for one scrape. // Wait for one scrape.
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel() defer cancel()
@ -828,32 +1051,35 @@ func TestManagerCTZeroIngestion(t *testing.T) {
app.mtx.Lock() app.mtx.Lock()
defer app.mtx.Unlock() defer app.mtx.Unlock()
// Check if scrape happened and grab the relevant samples, they have to be there - or it's a bug // Check if scrape happened and grab the relevant histograms, they have to be there - or it's a bug
// and it's not worth waiting. // and it's not worth waiting.
for _, f := range app.resultFloats { for _, h := range app.resultHistograms {
if f.metric.Get(model.MetricNameLabel) == mName { if h.metric.Get(model.MetricNameLabel) == mName {
got = append(got, f.f) got = append(got, h)
} }
} }
if len(app.resultFloats) > 0 { if len(app.resultHistograms) > 0 {
return nil return nil
} }
return fmt.Errorf("expected some samples, got none") return fmt.Errorf("expected some histogram samples, got none")
}), "after 1 minute") }), "after 1 minute")
scrapeManager.Stop() scrapeManager.Stop()
// Check for zero samples, assuming we only injected always one sample. // Check for zero samples, assuming we only injected always one histogram sample.
// Did it contain CT to inject? If yes, was CT zero enabled? // Did it contain CT to inject? If yes, was CT zero enabled?
if tc.counterSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion { if tc.inputHistSample.CreatedTimestamp.IsValid() && tc.enableCTZeroIngestion {
require.Len(t, got, 2) require.Len(t, got, 2)
require.Equal(t, 0.0, got[0]) // Zero sample.
require.Equal(t, tc.counterSample.GetValue(), got[1]) require.Equal(t, histogram.Histogram{}, *got[0].h)
// Quick soft check to make sure it's the same sample or at least not zero.
require.Equal(t, tc.inputHistSample.GetSampleSum(), got[1].h.Sum)
return return
} }
// Expect only one, valid sample. // Expect only one, valid sample.
require.Len(t, got, 1) require.Len(t, got, 1)
require.Equal(t, tc.counterSample.GetValue(), got[0]) // Quick soft check to make sure it's the same sample or at least not zero.
require.Equal(t, tc.inputHistSample.GetSampleSum(), got[0].h.Sum)
}) })
} }
} }

View file

@ -87,8 +87,6 @@ type scrapePool struct {
// Constructor for new scrape loops. This is settable for testing convenience. // Constructor for new scrape loops. This is settable for testing convenience.
newLoop func(scrapeLoopOptions) loop newLoop func(scrapeLoopOptions) loop
noDefaultPort bool
metrics *scrapeMetrics metrics *scrapeMetrics
scrapeFailureLogger log.Logger scrapeFailureLogger log.Logger
@ -150,7 +148,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed
logger: logger, logger: logger,
metrics: metrics, metrics: metrics,
httpOpts: options.HTTPClientOptions, httpOpts: options.HTTPClientOptions,
noDefaultPort: options.NoDefaultPort,
} }
sp.newLoop = func(opts scrapeLoopOptions) loop { sp.newLoop = func(opts scrapeLoopOptions) loop {
// Update the targets retrieval function for metadata to a new scrape cache. // Update the targets retrieval function for metadata to a new scrape cache.
@ -431,7 +428,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) {
sp.droppedTargets = []*Target{} sp.droppedTargets = []*Target{}
sp.droppedTargetsCount = 0 sp.droppedTargetsCount = 0
for _, tg := range tgs { for _, tg := range tgs {
targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) targets, failures := TargetsFromGroup(tg, sp.config, targets, lb)
for _, err := range failures { for _, err := range failures {
level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) level.Error(sp.logger).Log("msg", "Creating target failed", "err", err)
} }
@ -1546,7 +1543,7 @@ type appendErrors struct {
} }
func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) {
p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.enableCTZeroIngestion, sl.symbolTable)
if sl.convertClassicHistograms { if sl.convertClassicHistograms {
p = textparse.NewNHCBParser(p, sl.scrapeClassicHistograms) p = textparse.NewNHCBParser(p, sl.scrapeClassicHistograms)
} }
@ -1711,7 +1708,15 @@ loop:
} else { } else {
if sl.enableCTZeroIngestion { if sl.enableCTZeroIngestion {
if ctMs := p.CreatedTimestamp(); ctMs != nil { if ctMs := p.CreatedTimestamp(); ctMs != nil {
if isHistogram && sl.enableNativeHistogramIngestion {
if h != nil {
ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil)
} else {
ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh)
}
} else {
ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs)
}
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now.
// CT is an experimental feature. For now, we don't need to fail the // CT is an experimental feature. For now, we don't need to fail the
// scrape on errors updating the created timestamp, log debug. // scrape on errors updating the created timestamp, log debug.

View file

@ -442,7 +442,7 @@ func TestScrapePoolTargetLimit(t *testing.T) {
lerr := l.(*testLoop).getForcedError() lerr := l.(*testLoop).getForcedError()
if shouldErr { if shouldErr {
require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit) require.Error(t, lerr, "error was expected for %d targets with a limit of %d", targets, limit)
require.Equal(t, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit), lerr.Error()) require.EqualError(t, lerr, fmt.Sprintf("target_limit exceeded (number of targets: %d, limit: %d)", targets, limit))
} else { } else {
require.NoError(t, lerr) require.NoError(t, lerr)
} }
@ -1529,7 +1529,7 @@ func TestScrapeLoopAppendCacheEntryButErrNotFound(t *testing.T) {
fakeRef := storage.SeriesRef(1) fakeRef := storage.SeriesRef(1)
expValue := float64(1) expValue := float64(1)
metric := []byte(`metric{n="1"} 1`) metric := []byte(`metric{n="1"} 1`)
p, warning := textparse.New(metric, "", false, labels.NewSymbolTable()) p, warning := textparse.New(metric, "", false, false, labels.NewSymbolTable())
require.NoError(t, warning) require.NoError(t, warning)
var lset labels.Labels var lset labels.Labels
@ -2004,6 +2004,7 @@ metric: <
contentType: "application/vnd.google.protobuf", contentType: "application/vnd.google.protobuf",
histograms: []histogramSample{{ histograms: []histogramSample{{
t: 1234568, t: 1234568,
metric: labels.FromStrings("__name__", "test_histogram"),
h: &histogram.Histogram{ h: &histogram.Histogram{
Count: 175, Count: 175,
ZeroCount: 2, ZeroCount: 2,
@ -2130,6 +2131,7 @@ metric: <
}, },
histograms: []histogramSample{{ histograms: []histogramSample{{
t: 1234568, t: 1234568,
metric: labels.FromStrings("__name__", "test_histogram"),
h: &histogram.Histogram{ h: &histogram.Histogram{
Count: 175, Count: 175,
ZeroCount: 2, ZeroCount: 2,
@ -2551,7 +2553,7 @@ func TestTargetScrapeScrapeNotFound(t *testing.T) {
resp, err := ts.scrape(context.Background()) resp, err := ts.scrape(context.Background())
require.NoError(t, err) require.NoError(t, err)
_, err = ts.readResponse(context.Background(), resp, io.Discard) _, err = ts.readResponse(context.Background(), resp, io.Discard)
require.Contains(t, err.Error(), "404", "Expected \"404 NotFound\" error but got: %s", err) require.ErrorContains(t, err, "404", "Expected \"404 NotFound\" error but got: %s", err)
} }
func TestTargetScraperBodySizeLimit(t *testing.T) { func TestTargetScraperBodySizeLimit(t *testing.T) {
@ -4299,7 +4301,9 @@ func TestNativeHistogramMaxSchemaSet(t *testing.T) {
}, },
} }
for name, tc := range testcases { for name, tc := range testcases {
tc := tc
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
t.Parallel()
testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema) testNativeHistogramMaxSchemaSet(t, tc.minBucketFactor, tc.expectedSchema)
}) })
} }
@ -4342,8 +4346,8 @@ func testNativeHistogramMaxSchemaSet(t *testing.T, minBucketFactor string, expec
configStr := fmt.Sprintf(` configStr := fmt.Sprintf(`
global: global:
metric_name_validation_scheme: legacy metric_name_validation_scheme: legacy
scrape_interval: 1s scrape_interval: 50ms
scrape_timeout: 1s scrape_timeout: 25ms
scrape_configs: scrape_configs:
- job_name: test - job_name: test
%s %s
@ -4356,7 +4360,7 @@ scrape_configs:
s.DB.EnableNativeHistograms() s.DB.EnableNativeHistograms()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
mng, err := NewManager(&Options{EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg)
require.NoError(t, err) require.NoError(t, err)
cfg, err := config.Load(configStr, false, log.NewNopLogger()) cfg, err := config.Load(configStr, false, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
@ -4387,7 +4391,7 @@ scrape_configs:
countSeries++ countSeries++
} }
return countSeries > 0 return countSeries > 0
}, 15*time.Second, 100*time.Millisecond) }, 5*time.Second, 100*time.Millisecond)
// Check that native histogram schema is as expected. // Check that native histogram schema is as expected.
q, err := s.Querier(0, math.MaxInt64) q, err := s.Querier(0, math.MaxInt64)

View file

@ -17,7 +17,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"hash/fnv" "hash/fnv"
"net"
"net/url" "net/url"
"strings" "strings"
"sync" "sync"
@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels
// PopulateLabels builds a label set from the given label set and scrape configuration. // PopulateLabels builds a label set from the given label set and scrape configuration.
// It returns a label set before relabeling was applied as the second return value. // It returns a label set before relabeling was applied as the second return value.
// Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling.
func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) {
// Copy labels into the labelset for the target if they are not set already. // Copy labels into the labelset for the target if they are not set already.
scrapeLabels := []labels.Label{ scrapeLabels := []labels.Label{
{Name: model.JobLabel, Value: cfg.JobName}, {Name: model.JobLabel, Value: cfg.JobName},
@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address")
} }
// addPort checks whether we should add a default port to the address.
// If the address is not valid, we don't append a port either.
addPort := func(s string) (string, string, bool) {
// If we can split, a port exists and we don't have to add one.
if host, port, err := net.SplitHostPort(s); err == nil {
return host, port, false
}
// If adding a port makes it valid, the previous error
// was not due to an invalid address and we can append a port.
_, _, err := net.SplitHostPort(s + ":1234")
return "", "", err == nil
}
addr := lb.Get(model.AddressLabel) addr := lb.Get(model.AddressLabel)
scheme := lb.Get(model.SchemeLabel)
host, port, add := addPort(addr)
// If it's an address with no trailing port, infer it based on the used scheme
// unless the no-default-scrape-port feature flag is present.
if !noDefaultPort && add {
// Addresses reaching this point are already wrapped in [] if necessary.
switch scheme {
case "http", "":
addr += ":80"
case "https":
addr += ":443"
default:
return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme)
}
lb.Set(model.AddressLabel, addr)
}
if noDefaultPort {
// If it's an address with a trailing default port and the
// no-default-scrape-port flag is present, remove the port.
switch port {
case "80":
if scheme == "http" {
lb.Set(model.AddressLabel, host)
}
case "443":
if scheme == "https" {
lb.Set(model.AddressLabel, host)
}
}
}
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return labels.EmptyLabels(), labels.EmptyLabels(), err return labels.EmptyLabels(), labels.EmptyLabels(), err
@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort
} }
// TargetsFromGroup builds targets based on the given TargetGroup and config. // TargetsFromGroup builds targets based on the given TargetGroup and config.
func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) {
targets = targets[:0] targets = targets[:0]
failures := []error{} failures := []error{}
@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault
} }
} }
lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) lset, origLabels, err := PopulateLabels(lb, cfg)
if err != nil { if err != nil {
failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err))
} }

View file

@ -348,7 +348,7 @@ func TestTargetsFromGroup(t *testing.T) {
ScrapeInterval: model.Duration(1 * time.Minute), ScrapeInterval: model.Duration(1 * time.Minute),
} }
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, false, nil, lb) targets, failures := TargetsFromGroup(&targetgroup.Group{Targets: []model.LabelSet{{}, {model.AddressLabel: "localhost:9090"}}}, &cfg, nil, lb)
require.Len(t, targets, 1) require.Len(t, targets, 1)
require.Len(t, failures, 1) require.Len(t, failures, 1)
require.EqualError(t, failures[0], expectedError) require.EqualError(t, failures[0], expectedError)
@ -435,7 +435,7 @@ scrape_configs:
lb := labels.NewBuilder(labels.EmptyLabels()) lb := labels.NewBuilder(labels.EmptyLabels())
group := &targetgroup.Group{Targets: targets} group := &targetgroup.Group{Targets: targets}
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], false, tgets, lb) tgets, _ = TargetsFromGroup(group, config.ScrapeConfigs[0], tgets, lb)
if len(targets) != nTargets { if len(targets) != nTargets {
b.Fatalf("Expected %d targets, got %d", nTargets, len(targets)) b.Fatalf("Expected %d targets, got %d", nTargets, len(targets))
} }

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Install Go - name: Install Go
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with: with:

View file

@ -187,6 +187,10 @@ func (s fSample) Type() chunkenc.ValueType {
return chunkenc.ValFloat return chunkenc.ValFloat
} }
func (s fSample) Copy() chunks.Sample {
return s
}
type hSample struct { type hSample struct {
t int64 t int64
h *histogram.Histogram h *histogram.Histogram
@ -212,6 +216,10 @@ func (s hSample) Type() chunkenc.ValueType {
return chunkenc.ValHistogram return chunkenc.ValHistogram
} }
func (s hSample) Copy() chunks.Sample {
return hSample{t: s.t, h: s.h.Copy()}
}
type fhSample struct { type fhSample struct {
t int64 t int64
fh *histogram.FloatHistogram fh *histogram.FloatHistogram
@ -237,6 +245,10 @@ func (s fhSample) Type() chunkenc.ValueType {
return chunkenc.ValFloatHistogram return chunkenc.ValFloatHistogram
} }
func (s fhSample) Copy() chunks.Sample {
return fhSample{t: s.t, fh: s.fh.Copy()}
}
type sampleRing struct { type sampleRing struct {
delta int64 delta int64
@ -535,55 +547,8 @@ func (r *sampleRing) addFH(s fhSample) {
} }
} }
// genericAdd is a generic implementation of adding a chunks.Sample // addSample adds a sample to a buffer of chunks.Sample, i.e. the general case
// implementation to a buffer of a sample ring. However, the Go compiler // using an interface as the type.
// currently (go1.20) decides to not expand the code during compile time, but
// creates dynamic code to handle the different types. That has a significant
// overhead during runtime, noticeable in PromQL benchmarks. For example, the
// "RangeQuery/expr=rate(a_hundred[1d]),steps=.*" benchmarks show about 7%
// longer runtime, 9% higher allocation size, and 10% more allocations.
// Therefore, genericAdd has been manually implemented for all the types
// (addSample, addF, addH, addFH) below.
//
// func genericAdd[T chunks.Sample](s T, buf []T, r *sampleRing) []T {
// l := len(buf)
// // Grow the ring buffer if it fits no more elements.
// if l == 0 {
// buf = make([]T, 16)
// l = 16
// }
// if l == r.l {
// newBuf := make([]T, 2*l)
// copy(newBuf[l+r.f:], buf[r.f:])
// copy(newBuf, buf[:r.f])
//
// buf = newBuf
// r.i = r.f
// r.f += l
// l = 2 * l
// } else {
// r.i++
// if r.i >= l {
// r.i -= l
// }
// }
//
// buf[r.i] = s
// r.l++
//
// // Free head of the buffer of samples that just fell out of the range.
// tmin := s.T() - r.delta
// for buf[r.f].T() < tmin {
// r.f++
// if r.f >= l {
// r.f -= l
// }
// r.l--
// }
// return buf
// }
// addSample is a handcoded specialization of genericAdd (see above).
func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample { func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sample {
l := len(buf) l := len(buf)
// Grow the ring buffer if it fits no more elements. // Grow the ring buffer if it fits no more elements.
@ -607,7 +572,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam
} }
} }
buf[r.i] = s buf[r.i] = s.Copy()
r.l++ r.l++
// Free head of the buffer of samples that just fell out of the range. // Free head of the buffer of samples that just fell out of the range.
@ -622,7 +587,7 @@ func addSample(s chunks.Sample, buf []chunks.Sample, r *sampleRing) []chunks.Sam
return buf return buf
} }
// addF is a handcoded specialization of genericAdd (see above). // addF adds an fSample to a (specialized) fSample buffer.
func addF(s fSample, buf []fSample, r *sampleRing) []fSample { func addF(s fSample, buf []fSample, r *sampleRing) []fSample {
l := len(buf) l := len(buf)
// Grow the ring buffer if it fits no more elements. // Grow the ring buffer if it fits no more elements.
@ -661,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample {
return buf return buf
} }
// addH is a handcoded specialization of genericAdd (see above). // addF adds an hSample to a (specialized) hSample buffer.
func addH(s hSample, buf []hSample, r *sampleRing) []hSample { func addH(s hSample, buf []hSample, r *sampleRing) []hSample {
l := len(buf) l := len(buf)
// Grow the ring buffer if it fits no more elements. // Grow the ring buffer if it fits no more elements.
@ -705,7 +670,7 @@ func addH(s hSample, buf []hSample, r *sampleRing) []hSample {
return buf return buf
} }
// addFH is a handcoded specialization of genericAdd (see above). // addFH adds an fhSample to a (specialized) fhSample buffer.
func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample { func addFH(s fhSample, buf []fhSample, r *sampleRing) []fhSample {
l := len(buf) l := len(buf)
// Grow the ring buffer if it fits no more elements. // Grow the ring buffer if it fits no more elements.

View file

@ -314,6 +314,56 @@ func TestBufferedSeriesIteratorMixedHistograms(t *testing.T) {
require.Equal(t, histograms[1].ToFloat(nil), fh) require.Equal(t, histograms[1].ToFloat(nil), fh)
} }
func TestBufferedSeriesIteratorMixedFloatsAndHistograms(t *testing.T) {
histograms := tsdbutil.GenerateTestHistograms(5)
it := NewBufferIterator(NewListSeriesIteratorWithCopy(samples{
hSample{t: 1, h: histograms[0].Copy()},
fSample{t: 2, f: 2},
hSample{t: 3, h: histograms[1].Copy()},
hSample{t: 4, h: histograms[2].Copy()},
fhSample{t: 3, fh: histograms[3].ToFloat(nil)},
fhSample{t: 4, fh: histograms[4].ToFloat(nil)},
}), 6)
require.Equal(t, chunkenc.ValNone, it.Seek(7))
require.NoError(t, it.Err())
buf := it.Buffer()
require.Equal(t, chunkenc.ValHistogram, buf.Next())
_, h0 := buf.AtHistogram()
require.Equal(t, histograms[0], h0)
require.Equal(t, chunkenc.ValFloat, buf.Next())
_, v := buf.At()
require.Equal(t, 2.0, v)
require.Equal(t, chunkenc.ValHistogram, buf.Next())
_, h1 := buf.AtHistogram()
require.Equal(t, histograms[1], h1)
require.Equal(t, chunkenc.ValHistogram, buf.Next())
_, h2 := buf.AtHistogram()
require.Equal(t, histograms[2], h2)
require.Equal(t, chunkenc.ValFloatHistogram, buf.Next())
_, h3 := buf.AtFloatHistogram(nil)
require.Equal(t, histograms[3].ToFloat(nil), h3)
require.Equal(t, chunkenc.ValFloatHistogram, buf.Next())
_, h4 := buf.AtFloatHistogram(nil)
require.Equal(t, histograms[4].ToFloat(nil), h4)
// Test for overwrite bug where the buffered histogram was reused
// between items in the buffer.
require.Equal(t, histograms[0], h0)
require.Equal(t, histograms[1], h1)
require.Equal(t, histograms[2], h2)
require.Equal(t, histograms[3].ToFloat(nil), h3)
require.Equal(t, histograms[4].ToFloat(nil), h4)
}
func BenchmarkBufferedSeriesIterator(b *testing.B) { func BenchmarkBufferedSeriesIterator(b *testing.B) {
// Simulate a 5 minute rate. // Simulate a 5 minute rate.
it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60)

View file

@ -190,6 +190,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64
return ref, nil return ref, nil
} }
func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) {
ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh)
if err != nil {
return ref, err
}
for _, appender := range f.secondaries {
if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil {
return 0, err
}
}
return ref, nil
}
func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) {
ref, err := f.primary.UpdateMetadata(ref, l, m) ref, err := f.primary.UpdateMetadata(ref, l, m)
if err != nil { if err != nil {

View file

@ -173,16 +173,13 @@ func TestFanoutErrors(t *testing.T) {
} }
if tc.err != nil { if tc.err != nil {
require.Error(t, ss.Err()) require.EqualError(t, ss.Err(), tc.err.Error())
require.Equal(t, tc.err.Error(), ss.Err().Error())
} }
if tc.warning != nil { if tc.warning != nil {
require.NotEmpty(t, ss.Warnings(), "warnings expected")
w := ss.Warnings() w := ss.Warnings()
require.Error(t, w.AsErrors()[0]) require.NotEmpty(t, w, "warnings expected")
warn, _ := w.AsStrings("", 0, 0) require.EqualError(t, w.AsErrors()[0], tc.warning.Error())
require.Equal(t, tc.warning.Error(), warn[0])
} }
}) })
t.Run("chunks", func(t *testing.T) { t.Run("chunks", func(t *testing.T) {
@ -200,16 +197,13 @@ func TestFanoutErrors(t *testing.T) {
} }
if tc.err != nil { if tc.err != nil {
require.Error(t, ss.Err()) require.EqualError(t, ss.Err(), tc.err.Error())
require.Equal(t, tc.err.Error(), ss.Err().Error())
} }
if tc.warning != nil { if tc.warning != nil {
require.NotEmpty(t, ss.Warnings(), "warnings expected")
w := ss.Warnings() w := ss.Warnings()
require.Error(t, w.AsErrors()[0]) require.NotEmpty(t, w, "warnings expected")
warn, _ := w.AsStrings("", 0, 0) require.EqualError(t, w.AsErrors()[0], tc.warning.Error())
require.Equal(t, tc.warning.Error(), warn[0])
} }
}) })
} }

View file

@ -51,6 +51,7 @@ var (
// behaviour, and we currently don't have a way to determine this. As a result // behaviour, and we currently don't have a way to determine this. As a result
// it's recommended to ignore this error for now. // it's recommended to ignore this error for now.
ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring")
ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring")
) )
// SeriesRef is a generic series reference. In prometheus it is either a // SeriesRef is a generic series reference. In prometheus it is either a
@ -313,6 +314,20 @@ type HistogramAppender interface {
// pointer. AppendHistogram won't mutate the histogram, but in turn // pointer. AppendHistogram won't mutate the histogram, but in turn
// depends on the caller to not mutate it either. // depends on the caller to not mutate it either.
AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
// AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp,
// which will be associated with given series, labels and the incoming
// sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be
// appended, for example when ct is too old, or when it would collide with
// incoming sample (sample has priority).
//
// AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram.
// A series reference number is returned which can be used to modify the
// CT for the given series in the same or later transactions.
// Returned reference numbers are ephemeral and may be rejected in calls
// to AppendHistogramCTZeroSample() at any point.
//
// If the reference is 0 it must not be used for caching.
AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error)
} }
// MetadataUpdater provides an interface for associating metadata to stored series. // MetadataUpdater provides an interface for associating metadata to stored series.

View file

@ -86,7 +86,7 @@ func TestChunkedReader_Overflow(t *testing.T) {
_, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next() _, err = NewChunkedReader(bytes.NewReader(b2), 11, nil).Next()
require.Error(t, err, "expect exceed limit error") require.Error(t, err, "expect exceed limit error")
require.Equal(t, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes", err.Error()) require.EqualError(t, err, "chunkedReader: message size exceeded the limit 11 bytes; got: 12 bytes")
} }
func TestChunkedReader_CorruptedFrame(t *testing.T) { func TestChunkedReader_CorruptedFrame(t *testing.T) {
@ -102,5 +102,5 @@ func TestChunkedReader_CorruptedFrame(t *testing.T) {
_, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next() _, err = NewChunkedReader(bytes.NewReader(bs), 20, nil).Next()
require.Error(t, err, "expected malformed frame") require.Error(t, err, "expected malformed frame")
require.Equal(t, "chunkedReader: corrupted frame; checksum mismatch", err.Error()) require.EqualError(t, err, "chunkedReader: corrupted frame; checksum mismatch")
} }

View file

@ -253,8 +253,7 @@ func TestValidateLabelsAndMetricName(t *testing.T) {
t.Run(test.description, func(t *testing.T) { t.Run(test.description, func(t *testing.T) {
err := validateLabelsAndMetricName(test.input) err := validateLabelsAndMetricName(test.input)
if test.expectedErr != "" { if test.expectedErr != "" {
require.Error(t, err) require.EqualError(t, err, test.expectedErr)
require.Equal(t, test.expectedErr, err.Error())
} else { } else {
require.NoError(t, err) require.NoError(t, err)
} }
@ -551,7 +550,7 @@ func TestNegotiateResponseType(t *testing.T) {
_, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20}) _, err = NegotiateResponseType([]prompb.ReadRequest_ResponseType{20})
require.Error(t, err, "expected error due to not supported requested response types") require.Error(t, err, "expected error due to not supported requested response types")
require.Equal(t, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]", err.Error()) require.EqualError(t, err, "server does not support any of the requested response types: [20]; supported: map[SAMPLES:{} STREAMED_XOR_CHUNKS:{}]")
} }
func TestMergeLabels(t *testing.T) { func TestMergeLabels(t *testing.T) {

View file

@ -475,7 +475,9 @@ func TestSampleAndChunkQueryableClient(t *testing.T) {
) )
q, err := c.Querier(tc.mint, tc.maxt) q, err := c.Querier(tc.mint, tc.maxt)
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, q.Close()) defer func() {
require.NoError(t, q.Close())
}()
ss := q.Select(context.Background(), true, nil, tc.matchers...) ss := q.Select(context.Background(), true, nil, tc.matchers...)
require.NoError(t, err) require.NoError(t, err)

View file

@ -306,6 +306,11 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels,
return 0, nil return 0, nil
} }
func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) {
// TODO: Implement
return 0, nil
}
func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write.
// UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now.

View file

@ -915,6 +915,13 @@ func (m *mockAppendable) AppendHistogram(_ storage.SeriesRef, l labels.Labels, t
return 0, nil return 0, nil
} }
func (m *mockAppendable) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
// AppendCTZeroSample is no-op for remote-write for now.
// TODO(bwplotka/arthursens): Add support for PRW 2.0 for CT zero feature (but also we might
// replace this with in-metadata CT storage, see https://github.com/prometheus/prometheus/issues/14218).
return 0, nil
}
func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) { func (m *mockAppendable) UpdateMetadata(_ storage.SeriesRef, l labels.Labels, mp metadata.Metadata) (storage.SeriesRef, error) {
if m.updateMetadataErr != nil { if m.updateMetadataErr != nil {
return 0, m.updateMetadataErr return 0, m.updateMetadataErr

View file

@ -171,6 +171,34 @@ func (it *listSeriesIterator) Seek(t int64) chunkenc.ValueType {
func (it *listSeriesIterator) Err() error { return nil } func (it *listSeriesIterator) Err() error { return nil }
type listSeriesIteratorWithCopy struct {
*listSeriesIterator
}
func NewListSeriesIteratorWithCopy(samples Samples) chunkenc.Iterator {
return &listSeriesIteratorWithCopy{
listSeriesIterator: &listSeriesIterator{samples: samples, idx: -1},
}
}
func (it *listSeriesIteratorWithCopy) AtHistogram(h *histogram.Histogram) (int64, *histogram.Histogram) {
t, ih := it.listSeriesIterator.AtHistogram(nil)
if h == nil || ih == nil {
return t, ih
}
ih.CopyTo(h)
return t, h
}
func (it *listSeriesIteratorWithCopy) AtFloatHistogram(fh *histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
t, ih := it.listSeriesIterator.AtFloatHistogram(nil)
if fh == nil || ih == nil {
return t, ih
}
ih.CopyTo(fh)
return t, fh
}
type listChunkSeriesIterator struct { type listChunkSeriesIterator struct {
chks []chunks.Meta chks []chunks.Meta
idx int idx int

View file

@ -972,6 +972,11 @@ func (a *appender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t int
return storage.SeriesRef(series.ref), nil return storage.SeriesRef(series.ref), nil
} }
func (a *appender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
// TODO(bwplotka/arthursens): Wire metadata in the Agent's appender.
return 0, nil
}
func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) { func (a *appender) UpdateMetadata(storage.SeriesRef, labels.Labels, metadata.Metadata) (storage.SeriesRef, error) {
// TODO: Wire metadata in the Agent's appender. // TODO: Wire metadata in the Agent's appender.
return 0, nil return 0, nil

View file

@ -640,7 +640,9 @@ func Test_ExistingWAL_NextRef(t *testing.T) {
// Create a new storage and see what nextRef is initialized to. // Create a new storage and see what nextRef is initialized to.
db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions()) db, err = Open(log.NewNopLogger(), nil, rs, dbDir, DefaultOptions())
require.NoError(t, err) require.NoError(t, err)
defer require.NoError(t, db.Close()) defer func() {
require.NoError(t, db.Close())
}()
require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL") require.Equal(t, uint64(seriesCount+histogramCount), db.nextRef.Load(), "nextRef should be equal to the number of series written across the entire WAL")
} }

View file

@ -192,7 +192,7 @@ func TestCorruptedChunk(t *testing.T) {
// Check open err. // Check open err.
b, err := OpenBlock(nil, blockDir, nil) b, err := OpenBlock(nil, blockDir, nil)
if tc.openErr != nil { if tc.openErr != nil {
require.Equal(t, tc.openErr.Error(), err.Error()) require.EqualError(t, err, tc.openErr.Error())
return return
} }
defer func() { require.NoError(t, b.Close()) }() defer func() { require.NoError(t, b.Close()) }()
@ -206,7 +206,7 @@ func TestCorruptedChunk(t *testing.T) {
require.True(t, set.Next()) require.True(t, set.Next())
it := set.At().Iterator(nil) it := set.At().Iterator(nil)
require.Equal(t, chunkenc.ValNone, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next())
require.Equal(t, tc.iterErr.Error(), it.Err().Error()) require.EqualError(t, it.Err(), tc.iterErr.Error())
}) })
} }
} }

View file

@ -29,6 +29,7 @@ type Sample interface {
H() *histogram.Histogram H() *histogram.Histogram
FH() *histogram.FloatHistogram FH() *histogram.FloatHistogram
Type() chunkenc.ValueType Type() chunkenc.ValueType
Copy() Sample // Returns a deep copy.
} }
type SampleSlice []Sample type SampleSlice []Sample
@ -70,6 +71,17 @@ func (s sample) Type() chunkenc.ValueType {
} }
} }
func (s sample) Copy() Sample {
c := sample{t: s.t, f: s.f}
if s.h != nil {
c.h = s.h.Copy()
}
if s.fh != nil {
c.fh = s.fh.Copy()
}
return c
}
// GenerateSamples starting at start and counting up numSamples. // GenerateSamples starting at start and counting up numSamples.
func GenerateSamples(start, numSamples int) []Sample { func GenerateSamples(start, numSamples int) []Sample {
return generateSamples(start, numSamples, func(i int) Sample { return generateSamples(start, numSamples, func(i int) Sample {

View file

@ -1045,8 +1045,7 @@ func TestCompaction_populateBlock(t *testing.T) {
} }
err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc) err = blockPopulator.PopulateBlock(c.ctx, c.metrics, c.logger, c.chunkPool, c.mergeFunc, blocks, meta, iw, nopChunkWriter{}, irPostingsFunc)
if tc.expErr != nil { if tc.expErr != nil {
require.Error(t, err) require.EqualError(t, err, tc.expErr.Error())
require.Equal(t, tc.expErr.Error(), err.Error())
return return
} }
require.NoError(t, err) require.NoError(t, err)

View file

@ -2081,6 +2081,17 @@ func (s sample) Type() chunkenc.ValueType {
} }
} }
func (s sample) Copy() chunks.Sample {
c := sample{t: s.t, f: s.f}
if s.h != nil {
c.h = s.h.Copy()
}
if s.fh != nil {
c.fh = s.fh.Copy()
}
return c
}
// memSeries is the in-memory representation of a series. None of its methods // memSeries is the in-memory representation of a series. None of its methods
// are goroutine safe and it is the caller's responsibility to lock it. // are goroutine safe and it is the caller's responsibility to lock it.
type memSeries struct { type memSeries struct {

View file

@ -79,6 +79,16 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t
return a.app.AppendHistogram(ref, l, t, h, fh) return a.app.AppendHistogram(ref, l, t, h, fh)
} }
func (a *initAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if a.app != nil {
return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh)
}
a.head.initTime(t)
a.app = a.head.appender()
return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh)
}
func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
if a.app != nil { if a.app != nil {
return a.app.UpdateMetadata(ref, l, m) return a.app.UpdateMetadata(ref, l, m)
@ -388,7 +398,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64
// storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. // storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation.
func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) {
if ct >= t { if ct >= t {
return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") return 0, storage.ErrCTNewerThanSample
} }
s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
@ -747,6 +757,107 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels
return storage.SeriesRef(s.ref), nil return storage.SeriesRef(s.ref), nil
} }
func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
if !a.head.opts.EnableNativeHistograms.Load() {
return 0, storage.ErrNativeHistogramsDisabled
}
if ct >= t {
return 0, storage.ErrCTNewerThanSample
}
s := a.head.series.getByID(chunks.HeadSeriesRef(ref))
if s == nil {
// Ensure no empty labels have gotten through.
lset = lset.WithoutEmpty()
if lset.IsEmpty() {
return 0, fmt.Errorf("empty labelset: %w", ErrInvalidSample)
}
if l, dup := lset.HasDuplicateLabelNames(); dup {
return 0, fmt.Errorf(`label name "%s" is not unique: %w`, l, ErrInvalidSample)
}
var created bool
var err error
s, created, err = a.head.getOrCreate(lset.Hash(), lset)
if err != nil {
return 0, err
}
if created {
switch {
case h != nil:
s.lastHistogramValue = &histogram.Histogram{}
case fh != nil:
s.lastFloatHistogramValue = &histogram.FloatHistogram{}
}
a.series = append(a.series, record.RefSeries{
Ref: s.ref,
Labels: lset,
})
}
}
switch {
case h != nil:
zeroHistogram := &histogram.Histogram{}
s.Lock()
// Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed.
// We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true)
if err != nil {
s.Unlock()
if errors.Is(err, storage.ErrOutOfOrderSample) {
return 0, storage.ErrOutOfOrderCT
}
}
// OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples.
// This is to prevent the injected zero from being marked as OOO forever.
if isOOO {
s.Unlock()
return 0, storage.ErrOutOfOrderCT
}
s.pendingCommit = true
s.Unlock()
a.histograms = append(a.histograms, record.RefHistogramSample{
Ref: s.ref,
T: ct,
H: zeroHistogram,
})
a.histogramSeries = append(a.histogramSeries, s)
case fh != nil:
zeroFloatHistogram := &histogram.FloatHistogram{}
s.Lock()
// Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed.
// We set it to true to make this implementation as close as possible to the float implementation.
isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples.
if err != nil {
s.Unlock()
if errors.Is(err, storage.ErrOutOfOrderSample) {
return 0, storage.ErrOutOfOrderCT
}
}
// OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples.
// This is to prevent the injected zero from being marked as OOO forever.
if isOOO {
s.Unlock()
return 0, storage.ErrOutOfOrderCT
}
s.pendingCommit = true
s.Unlock()
a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{
Ref: s.ref,
T: ct,
FH: zeroFloatHistogram,
})
a.floatHistogramSeries = append(a.floatHistogramSeries, s)
}
if ct > a.maxt {
a.maxt = ct
}
return storage.SeriesRef(s.ref), nil
}
// UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't
// use getOrCreate or make any of the lset sanity checks that Append does. // use getOrCreate or make any of the lset sanity checks that Append does.
func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) {

View file

@ -2414,8 +2414,7 @@ func TestAddDuplicateLabelName(t *testing.T) {
add := func(labels labels.Labels, labelName string) { add := func(labels labels.Labels, labelName string) {
app := h.Appender(context.Background()) app := h.Appender(context.Background())
_, err := app.Append(0, labels, 0, 0) _, err := app.Append(0, labels, 0, 0)
require.Error(t, err) require.EqualError(t, err, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName))
require.Equal(t, fmt.Sprintf(`label name "%s" is not unique: invalid sample`, labelName), err.Error())
} }
add(labels.FromStrings("a", "c", "a", "b"), "a") add(labels.FromStrings("a", "c", "a", "b"), "a")
@ -6281,10 +6280,14 @@ func TestHeadAppender_AppendFloatWithSameTimestampAsPreviousHistogram(t *testing
require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0)) require.ErrorIs(t, err, storage.NewDuplicateHistogramToFloatErr(2_000, 10.0))
} }
func TestHeadAppender_AppendCTZeroSample(t *testing.T) { func TestHeadAppender_AppendCT(t *testing.T) {
testHistogram := tsdbutil.GenerateTestHistogram(1)
testFloatHistogram := tsdbutil.GenerateTestFloatHistogram(1)
type appendableSamples struct { type appendableSamples struct {
ts int64 ts int64
val float64 fSample float64
h *histogram.Histogram
fh *histogram.FloatHistogram
ct int64 ct int64
} }
for _, tc := range []struct { for _, tc := range []struct {
@ -6293,20 +6296,10 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
expectedSamples []chunks.Sample expectedSamples []chunks.Sample
}{ }{
{ {
name: "In order ct+normal sample", name: "In order ct+normal sample/floatSample",
appendableSamples: []appendableSamples{ appendableSamples: []appendableSamples{
{ts: 100, val: 10, ct: 1}, {ts: 100, fSample: 10, ct: 1},
}, {ts: 101, fSample: 10, ct: 1},
expectedSamples: []chunks.Sample{
sample{t: 1, f: 0},
sample{t: 100, f: 10},
},
},
{
name: "Consecutive appends with same ct ignore ct",
appendableSamples: []appendableSamples{
{ts: 100, val: 10, ct: 1},
{ts: 101, val: 10, ct: 1},
}, },
expectedSamples: []chunks.Sample{ expectedSamples: []chunks.Sample{
sample{t: 1, f: 0}, sample{t: 1, f: 0},
@ -6315,10 +6308,86 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
}, },
}, },
{ {
name: "Consecutive appends with newer ct do not ignore ct", name: "In order ct+normal sample/histogram",
appendableSamples: []appendableSamples{ appendableSamples: []appendableSamples{
{ts: 100, val: 10, ct: 1}, {ts: 100, h: testHistogram, ct: 1},
{ts: 102, val: 10, ct: 101}, {ts: 101, h: testHistogram, ct: 1},
},
expectedSamples: func() []chunks.Sample {
hNoCounterReset := *testHistogram
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}},
sample{t: 100, h: testHistogram},
sample{t: 101, h: &hNoCounterReset},
}
}(),
},
{
name: "In order ct+normal sample/floathistogram",
appendableSamples: []appendableSamples{
{ts: 100, fh: testFloatHistogram, ct: 1},
{ts: 101, fh: testFloatHistogram, ct: 1},
},
expectedSamples: func() []chunks.Sample {
fhNoCounterReset := *testFloatHistogram
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}},
sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &fhNoCounterReset},
}
}(),
},
{
name: "Consecutive appends with same ct ignore ct/floatSample",
appendableSamples: []appendableSamples{
{ts: 100, fSample: 10, ct: 1},
{ts: 101, fSample: 10, ct: 1},
},
expectedSamples: []chunks.Sample{
sample{t: 1, f: 0},
sample{t: 100, f: 10},
sample{t: 101, f: 10},
},
},
{
name: "Consecutive appends with same ct ignore ct/histogram",
appendableSamples: []appendableSamples{
{ts: 100, h: testHistogram, ct: 1},
{ts: 101, h: testHistogram, ct: 1},
},
expectedSamples: func() []chunks.Sample {
hNoCounterReset := *testHistogram
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}},
sample{t: 100, h: testHistogram},
sample{t: 101, h: &hNoCounterReset},
}
}(),
},
{
name: "Consecutive appends with same ct ignore ct/floathistogram",
appendableSamples: []appendableSamples{
{ts: 100, fh: testFloatHistogram, ct: 1},
{ts: 101, fh: testFloatHistogram, ct: 1},
},
expectedSamples: func() []chunks.Sample {
fhNoCounterReset := *testFloatHistogram
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}},
sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &fhNoCounterReset},
}
}(),
},
{
name: "Consecutive appends with newer ct do not ignore ct/floatSample",
appendableSamples: []appendableSamples{
{ts: 100, fSample: 10, ct: 1},
{ts: 102, fSample: 10, ct: 101},
}, },
expectedSamples: []chunks.Sample{ expectedSamples: []chunks.Sample{
sample{t: 1, f: 0}, sample{t: 1, f: 0},
@ -6328,10 +6397,36 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
}, },
}, },
{ {
name: "CT equals to previous sample timestamp is ignored", name: "Consecutive appends with newer ct do not ignore ct/histogram",
appendableSamples: []appendableSamples{ appendableSamples: []appendableSamples{
{ts: 100, val: 10, ct: 1}, {ts: 100, h: testHistogram, ct: 1},
{ts: 101, val: 10, ct: 100}, {ts: 102, h: testHistogram, ct: 101},
},
expectedSamples: []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}},
sample{t: 100, h: testHistogram},
sample{t: 101, h: &histogram.Histogram{CounterResetHint: histogram.CounterReset}},
sample{t: 102, h: testHistogram},
},
},
{
name: "Consecutive appends with newer ct do not ignore ct/floathistogram",
appendableSamples: []appendableSamples{
{ts: 100, fh: testFloatHistogram, ct: 1},
{ts: 102, fh: testFloatHistogram, ct: 101},
},
expectedSamples: []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}},
sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &histogram.FloatHistogram{CounterResetHint: histogram.CounterReset}},
sample{t: 102, fh: testFloatHistogram},
},
},
{
name: "CT equals to previous sample timestamp is ignored/floatSample",
appendableSamples: []appendableSamples{
{ts: 100, fSample: 10, ct: 1},
{ts: 101, fSample: 10, ct: 100},
}, },
expectedSamples: []chunks.Sample{ expectedSamples: []chunks.Sample{
sample{t: 1, f: 0}, sample{t: 1, f: 0},
@ -6339,6 +6434,38 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
sample{t: 101, f: 10}, sample{t: 101, f: 10},
}, },
}, },
{
name: "CT equals to previous sample timestamp is ignored/histogram",
appendableSamples: []appendableSamples{
{ts: 100, h: testHistogram, ct: 1},
{ts: 101, h: testHistogram, ct: 100},
},
expectedSamples: func() []chunks.Sample {
hNoCounterReset := *testHistogram
hNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{
sample{t: 1, h: &histogram.Histogram{}},
sample{t: 100, h: testHistogram},
sample{t: 101, h: &hNoCounterReset},
}
}(),
},
{
name: "CT equals to previous sample timestamp is ignored/floathistogram",
appendableSamples: []appendableSamples{
{ts: 100, fh: testFloatHistogram, ct: 1},
{ts: 101, fh: testFloatHistogram, ct: 100},
},
expectedSamples: func() []chunks.Sample {
fhNoCounterReset := *testFloatHistogram
fhNoCounterReset.CounterResetHint = histogram.NotCounterReset
return []chunks.Sample{
sample{t: 1, fh: &histogram.FloatHistogram{}},
sample{t: 100, fh: testFloatHistogram},
sample{t: 101, fh: &fhNoCounterReset},
}
}(),
},
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false) h, _ := newTestHead(t, DefaultBlockDuration, wlog.CompressionNone, false)
@ -6348,11 +6475,22 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
a := h.Appender(context.Background()) a := h.Appender(context.Background())
lbls := labels.FromStrings("foo", "bar") lbls := labels.FromStrings("foo", "bar")
for _, sample := range tc.appendableSamples { for _, sample := range tc.appendableSamples {
// Append float if it's a float test case
if sample.fSample != 0 {
_, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct) _, err := a.AppendCTZeroSample(0, lbls, sample.ts, sample.ct)
require.NoError(t, err) require.NoError(t, err)
_, err = a.Append(0, lbls, sample.ts, sample.val) _, err = a.Append(0, lbls, sample.ts, sample.fSample)
require.NoError(t, err) require.NoError(t, err)
} }
// Append histograms if it's a histogram test case
if sample.h != nil || sample.fh != nil {
ref, err := a.AppendHistogramCTZeroSample(0, lbls, sample.ts, sample.ct, sample.h, sample.fh)
require.NoError(t, err)
_, err = a.AppendHistogram(ref, lbls, sample.ts, sample.h, sample.fh)
require.NoError(t, err)
}
}
require.NoError(t, a.Commit()) require.NoError(t, a.Commit())
q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64) q, err := NewBlockQuerier(h, math.MinInt64, math.MaxInt64)

View file

@ -26,6 +26,7 @@ import (
"sync" "sync"
"github.com/bboreham/go-loser" "github.com/bboreham/go-loser"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
@ -292,8 +293,17 @@ func (p *MemPostings) EnsureOrder(numberOfConcurrentProcesses int) {
func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) { func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected map[labels.Label]struct{}) {
p.mtx.Lock() p.mtx.Lock()
defer p.mtx.Unlock() defer p.mtx.Unlock()
if len(p.m) == 0 || len(deleted) == 0 {
return
}
process := func(l labels.Label) { // Deleting label names mutates p.m map, so it should be done from a single goroutine after nobody else is reading it.
deleteLabelNames := make(chan string, len(p.m))
process, wait := processWithBoundedParallelismAndConsistentWorkers(
runtime.GOMAXPROCS(0),
func(l labels.Label) uint64 { return xxhash.Sum64String(l.Name) },
func(l labels.Label) {
orig := p.m[l.Name][l.Value] orig := p.m[l.Name][l.Value]
repl := make([]storage.SeriesRef, 0, len(orig)) repl := make([]storage.SeriesRef, 0, len(orig))
for _, id := range orig { for _, id := range orig {
@ -305,17 +315,54 @@ func (p *MemPostings) Delete(deleted map[storage.SeriesRef]struct{}, affected ma
p.m[l.Name][l.Value] = repl p.m[l.Name][l.Value] = repl
} else { } else {
delete(p.m[l.Name], l.Value) delete(p.m[l.Name], l.Value)
// Delete the key if we removed all values.
if len(p.m[l.Name]) == 0 { if len(p.m[l.Name]) == 0 {
delete(p.m, l.Name) // Delete the key if we removed all values.
} deleteLabelNames <- l.Name
} }
} }
},
)
for l := range affected { for l := range affected {
process(l) process(l)
} }
process(allPostingsKey) process(allPostingsKey)
wait()
// Close deleteLabelNames channel and delete the label names requested.
close(deleteLabelNames)
for name := range deleteLabelNames {
delete(p.m, name)
}
}
// processWithBoundedParallelismAndConsistentWorkers will call f() with bounded parallelism,
// making sure that elements with same hash(T) will always be processed by the same worker.
// Call process() to add more jobs to process, and once finished adding, call wait() to ensure that all jobs are processed.
func processWithBoundedParallelismAndConsistentWorkers[T any](workers int, hash func(T) uint64, f func(T)) (process func(T), wait func()) {
wg := &sync.WaitGroup{}
jobs := make([]chan T, workers)
for i := 0; i < workers; i++ {
wg.Add(1)
jobs[i] = make(chan T, 128)
go func(jobs <-chan T) {
defer wg.Done()
for l := range jobs {
f(l)
}
}(jobs[i])
}
process = func(job T) {
jobs[hash(job)%uint64(workers)] <- job
}
wait = func() {
for i := range jobs {
close(jobs[i])
}
wg.Wait()
}
return process, wait
} }
// Iter calls f for each postings list. It aborts if f returns an error and returns it. // Iter calls f for each postings list. It aborts if f returns an error and returns it.

View file

@ -973,6 +973,7 @@ func TestMemPostingsStats(t *testing.T) {
} }
func TestMemPostings_Delete(t *testing.T) { func TestMemPostings_Delete(t *testing.T) {
t.Run("some postings", func(t *testing.T) {
p := NewMemPostings() p := NewMemPostings()
p.Add(1, labels.FromStrings("lbl1", "a")) p.Add(1, labels.FromStrings("lbl1", "a"))
p.Add(2, labels.FromStrings("lbl1", "b")) p.Add(2, labels.FromStrings("lbl1", "b"))
@ -1004,6 +1005,37 @@ func TestMemPostings_Delete(t *testing.T) {
expanded, err = ExpandPostings(deleted) expanded, err = ExpandPostings(deleted)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, expanded, "expected empty postings, got %v", expanded) require.Empty(t, expanded, "expected empty postings, got %v", expanded)
})
t.Run("all postings", func(t *testing.T) {
p := NewMemPostings()
p.Add(1, labels.FromStrings("lbl1", "a"))
p.Add(2, labels.FromStrings("lbl1", "b"))
p.Add(3, labels.FromStrings("lbl2", "a"))
deletedRefs := map[storage.SeriesRef]struct{}{1: {}, 2: {}, 3: {}}
affectedLabels := map[labels.Label]struct{}{
{Name: "lbl1", Value: "a"}: {},
{Name: "lbl1", Value: "b"}: {},
{Name: "lbl1", Value: "c"}: {},
}
p.Delete(deletedRefs, affectedLabels)
after := p.Get(allPostingsKey.Name, allPostingsKey.Value)
expanded, err := ExpandPostings(after)
require.NoError(t, err)
require.Empty(t, expanded)
})
t.Run("nothing on empty mempostings", func(t *testing.T) {
p := NewMemPostings()
deletedRefs := map[storage.SeriesRef]struct{}{}
affectedLabels := map[labels.Label]struct{}{}
p.Delete(deletedRefs, affectedLabels)
after := p.Get(allPostingsKey.Name, allPostingsKey.Value)
expanded, err := ExpandPostings(after)
require.NoError(t, err)
require.Empty(t, expanded)
})
} }
// BenchmarkMemPostings_Delete is quite heavy, so consider running it with // BenchmarkMemPostings_Delete is quite heavy, so consider running it with
@ -1025,7 +1057,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) {
return s return s
} }
const total = 1e6 const total = 2e6
allSeries := [total]labels.Labels{} allSeries := [total]labels.Labels{}
nameValues := make([]string, 0, 100) nameValues := make([]string, 0, 100)
for i := 0; i < total; i++ { for i := 0; i < total; i++ {

View file

@ -0,0 +1,185 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notifications
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const (
ConfigurationUnsuccessful = "Configuration reload has failed."
StartingUp = "Prometheus is starting and replaying the write-ahead log (WAL)."
ShuttingDown = "Prometheus is shutting down and gracefully stopping all operations."
)
// Notification represents an individual notification message.
type Notification struct {
Text string `json:"text"`
Date time.Time `json:"date"`
Active bool `json:"active"`
}
// Notifications stores a list of Notification objects.
// It also manages live subscribers that receive notifications via channels.
type Notifications struct {
mu sync.Mutex
notifications []Notification
subscribers map[chan Notification]struct{} // Active subscribers.
maxSubscribers int
subscriberGauge prometheus.Gauge
notificationsSent prometheus.Counter
notificationsDropped prometheus.Counter
}
// NewNotifications creates a new Notifications instance.
func NewNotifications(maxSubscribers int, reg prometheus.Registerer) *Notifications {
n := &Notifications{
subscribers: make(map[chan Notification]struct{}),
maxSubscribers: maxSubscribers,
subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "notification_active_subscribers",
Help: "The current number of active notification subscribers.",
}),
notificationsSent: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "notification_updates_sent_total",
Help: "Total number of notification updates sent.",
}),
notificationsDropped: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: "prometheus",
Subsystem: "api",
Name: "notification_updates_dropped_total",
Help: "Total number of notification updates dropped.",
}),
}
if reg != nil {
reg.MustRegister(n.subscriberGauge, n.notificationsSent, n.notificationsDropped)
}
return n
}
// AddNotification adds a new notification or updates the timestamp if it already exists.
func (n *Notifications) AddNotification(text string) {
n.mu.Lock()
defer n.mu.Unlock()
for i, notification := range n.notifications {
if notification.Text == text {
n.notifications[i].Date = time.Now()
n.notifySubscribers(n.notifications[i])
return
}
}
newNotification := Notification{
Text: text,
Date: time.Now(),
Active: true,
}
n.notifications = append(n.notifications, newNotification)
n.notifySubscribers(newNotification)
}
// notifySubscribers sends a notification to all active subscribers.
func (n *Notifications) notifySubscribers(notification Notification) {
for sub := range n.subscribers {
// Non-blocking send to avoid subscriber blocking issues.
n.notificationsSent.Inc()
select {
case sub <- notification:
// Notification sent to the subscriber.
default:
// Drop the notification if the subscriber's channel is full.
n.notificationsDropped.Inc()
}
}
}
// DeleteNotification removes the first notification that matches the provided text.
// The deleted notification is sent to subscribers with Active: false before being removed.
func (n *Notifications) DeleteNotification(text string) {
n.mu.Lock()
defer n.mu.Unlock()
// Iterate through the notifications to find the matching text.
for i, notification := range n.notifications {
if notification.Text == text {
// Mark the notification as inactive and notify subscribers.
notification.Active = false
n.notifySubscribers(notification)
// Remove the notification from the list.
n.notifications = append(n.notifications[:i], n.notifications[i+1:]...)
return
}
}
}
// Get returns a copy of the list of notifications for safe access outside the struct.
func (n *Notifications) Get() []Notification {
n.mu.Lock()
defer n.mu.Unlock()
// Return a copy of the notifications slice to avoid modifying the original slice outside.
notificationsCopy := make([]Notification, len(n.notifications))
copy(notificationsCopy, n.notifications)
return notificationsCopy
}
// Sub allows a client to subscribe to live notifications.
// It returns a channel where the subscriber will receive notifications and a function to unsubscribe.
// Each subscriber has its own goroutine to handle notifications and prevent blocking.
func (n *Notifications) Sub() (<-chan Notification, func(), bool) {
n.mu.Lock()
defer n.mu.Unlock()
if len(n.subscribers) >= n.maxSubscribers {
return nil, nil, false
}
ch := make(chan Notification, 10) // Buffered channel to prevent blocking.
// Add the new subscriber to the list.
n.subscribers[ch] = struct{}{}
n.subscriberGauge.Set(float64(len(n.subscribers)))
// Send all current notifications to the new subscriber.
for _, notification := range n.notifications {
ch <- notification
}
// Unsubscribe function to remove the channel from subscribers.
unsubscribe := func() {
n.mu.Lock()
defer n.mu.Unlock()
// Close the channel and remove it from the subscribers map.
close(ch)
delete(n.subscribers, ch)
n.subscriberGauge.Set(float64(len(n.subscribers)))
}
return ch, unsubscribe, true
}

View file

@ -0,0 +1,223 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notifications
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
// TestNotificationLifecycle tests adding, modifying, and deleting notifications.
func TestNotificationLifecycle(t *testing.T) {
notifs := NewNotifications(10, nil)
// Add a notification.
notifs.AddNotification("Test Notification 1")
// Check if the notification was added.
notifications := notifs.Get()
require.Len(t, notifications, 1, "Expected 1 notification after addition.")
require.Equal(t, "Test Notification 1", notifications[0].Text, "Notification text mismatch.")
require.True(t, notifications[0].Active, "Expected notification to be active.")
// Modify the notification.
notifs.AddNotification("Test Notification 1")
notifications = notifs.Get()
require.Len(t, notifications, 1, "Expected 1 notification after modification.")
// Delete the notification.
notifs.DeleteNotification("Test Notification 1")
notifications = notifs.Get()
require.Empty(t, notifications, "Expected no notifications after deletion.")
}
// TestSubscriberReceivesNotifications tests that a subscriber receives notifications, including modifications and deletions.
func TestSubscriberReceivesNotifications(t *testing.T) {
notifs := NewNotifications(10, nil)
// Subscribe to notifications.
sub, unsubscribe, ok := notifs.Sub()
require.True(t, ok)
var wg sync.WaitGroup
wg.Add(1)
receivedNotifications := make([]Notification, 0)
// Goroutine to listen for notifications.
go func() {
defer wg.Done()
for notification := range sub {
receivedNotifications = append(receivedNotifications, notification)
}
}()
// Add notifications.
notifs.AddNotification("Test Notification 1")
notifs.AddNotification("Test Notification 2")
// Modify a notification.
notifs.AddNotification("Test Notification 1")
// Delete a notification.
notifs.DeleteNotification("Test Notification 2")
// Wait for notifications to propagate.
time.Sleep(100 * time.Millisecond)
unsubscribe()
wg.Wait() // Wait for the subscriber goroutine to finish.
// Verify that we received the expected number of notifications.
require.Len(t, receivedNotifications, 4, "Expected 4 notifications (2 active, 1 modified, 1 deleted).")
// Check the content and state of received notifications.
expected := []struct {
Text string
Active bool
}{
{"Test Notification 1", true},
{"Test Notification 2", true},
{"Test Notification 1", true},
{"Test Notification 2", false},
}
for i, n := range receivedNotifications {
require.Equal(t, expected[i].Text, n.Text, "Notification text mismatch at index %d.", i)
require.Equal(t, expected[i].Active, n.Active, "Notification active state mismatch at index %d.", i)
}
}
// TestMultipleSubscribers tests that multiple subscribers receive notifications independently.
func TestMultipleSubscribers(t *testing.T) {
notifs := NewNotifications(10, nil)
// Subscribe two subscribers to notifications.
sub1, unsubscribe1, ok1 := notifs.Sub()
require.True(t, ok1)
sub2, unsubscribe2, ok2 := notifs.Sub()
require.True(t, ok2)
var wg sync.WaitGroup
wg.Add(2)
receivedSub1 := make([]Notification, 0)
receivedSub2 := make([]Notification, 0)
// Goroutine for subscriber 1.
go func() {
defer wg.Done()
for notification := range sub1 {
receivedSub1 = append(receivedSub1, notification)
}
}()
// Goroutine for subscriber 2.
go func() {
defer wg.Done()
for notification := range sub2 {
receivedSub2 = append(receivedSub2, notification)
}
}()
// Add and delete notifications.
notifs.AddNotification("Test Notification 1")
notifs.DeleteNotification("Test Notification 1")
// Wait for notifications to propagate.
time.Sleep(100 * time.Millisecond)
// Unsubscribe both.
unsubscribe1()
unsubscribe2()
wg.Wait()
// Both subscribers should have received the same 2 notifications.
require.Len(t, receivedSub1, 2, "Expected 2 notifications for subscriber 1.")
require.Len(t, receivedSub2, 2, "Expected 2 notifications for subscriber 2.")
// Verify that both subscribers received the same notifications.
for i := 0; i < 2; i++ {
require.Equal(t, receivedSub1[i], receivedSub2[i], "Subscriber notification mismatch at index %d.", i)
}
}
// TestUnsubscribe tests that unsubscribing prevents further notifications from being received.
func TestUnsubscribe(t *testing.T) {
notifs := NewNotifications(10, nil)
// Subscribe to notifications.
sub, unsubscribe, ok := notifs.Sub()
require.True(t, ok)
var wg sync.WaitGroup
wg.Add(1)
receivedNotifications := make([]Notification, 0)
// Goroutine to listen for notifications.
go func() {
defer wg.Done()
for notification := range sub {
receivedNotifications = append(receivedNotifications, notification)
}
}()
// Add a notification and then unsubscribe.
notifs.AddNotification("Test Notification 1")
time.Sleep(100 * time.Millisecond) // Allow time for notification delivery.
unsubscribe() // Unsubscribe.
// Add another notification after unsubscribing.
notifs.AddNotification("Test Notification 2")
// Wait for the subscriber goroutine to finish.
wg.Wait()
// Only the first notification should have been received.
require.Len(t, receivedNotifications, 1, "Expected 1 notification before unsubscribe.")
require.Equal(t, "Test Notification 1", receivedNotifications[0].Text, "Unexpected notification text.")
}
// TestMaxSubscribers tests that exceeding the max subscribers limit prevents additional subscriptions.
func TestMaxSubscribers(t *testing.T) {
maxSubscribers := 2
notifs := NewNotifications(maxSubscribers, nil)
// Subscribe the maximum number of subscribers.
_, unsubscribe1, ok1 := notifs.Sub()
require.True(t, ok1, "Expected first subscription to succeed.")
_, unsubscribe2, ok2 := notifs.Sub()
require.True(t, ok2, "Expected second subscription to succeed.")
// Try to subscribe more than the max allowed.
_, _, ok3 := notifs.Sub()
require.False(t, ok3, "Expected third subscription to fail due to max subscriber limit.")
// Unsubscribe one subscriber and try again.
unsubscribe1()
_, unsubscribe4, ok4 := notifs.Sub()
require.True(t, ok4, "Expected subscription to succeed after unsubscribing a subscriber.")
// Clean up the subscriptions.
unsubscribe2()
unsubscribe4()
}

View file

@ -15,21 +15,56 @@ package testutil
import ( import (
"net" "net"
"sync"
"testing" "testing"
) )
var (
mu sync.Mutex
usedPorts []int
)
// RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. // RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing.
func RandomUnprivilegedPort(t *testing.T) int { func RandomUnprivilegedPort(t *testing.T) int {
t.Helper() t.Helper()
mu.Lock()
defer mu.Unlock()
port, err := getPort()
if err != nil {
t.Fatal(err)
}
for portWasUsed(port) {
port, err = getPort()
if err != nil {
t.Fatal(err)
}
}
usedPorts = append(usedPorts, port)
return port
}
func portWasUsed(port int) bool {
for _, usedPort := range usedPorts {
if port == usedPort {
return true
}
}
return false
}
func getPort() (int, error) {
listener, err := net.Listen("tcp", ":0") listener, err := net.Listen("tcp", ":0")
if err != nil { if err != nil {
t.Fatalf("Listening on random port: %v", err) return 0, err
} }
if err := listener.Close(); err != nil { if err := listener.Close(); err != nil {
t.Fatalf("Closing listener: %v", err) return 0, err
} }
return listener.Addr().(*net.TCPAddr).Port return listener.Addr().(*net.TCPAddr).Port, nil
} }

View file

@ -15,6 +15,7 @@ package v1
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"math" "math"
@ -53,6 +54,7 @@ import (
"github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
"github.com/prometheus/prometheus/util/httputil" "github.com/prometheus/prometheus/util/httputil"
"github.com/prometheus/prometheus/util/notifications"
"github.com/prometheus/prometheus/util/stats" "github.com/prometheus/prometheus/util/stats"
) )
@ -212,6 +214,8 @@ type API struct {
gatherer prometheus.Gatherer gatherer prometheus.Gatherer
isAgent bool isAgent bool
statsRenderer StatsRenderer statsRenderer StatsRenderer
notificationsGetter func() []notifications.Notification
notificationsSub func() (<-chan notifications.Notification, func(), bool)
remoteWriteHandler http.Handler remoteWriteHandler http.Handler
remoteReadHandler http.Handler remoteReadHandler http.Handler
@ -245,6 +249,8 @@ func NewAPI(
corsOrigin *regexp.Regexp, corsOrigin *regexp.Regexp,
runtimeInfo func() (RuntimeInfo, error), runtimeInfo func() (RuntimeInfo, error),
buildInfo *PrometheusVersion, buildInfo *PrometheusVersion,
notificationsGetter func() []notifications.Notification,
notificationsSub func() (<-chan notifications.Notification, func(), bool),
gatherer prometheus.Gatherer, gatherer prometheus.Gatherer,
registerer prometheus.Registerer, registerer prometheus.Registerer,
statsRenderer StatsRenderer, statsRenderer StatsRenderer,
@ -277,6 +283,8 @@ func NewAPI(
gatherer: gatherer, gatherer: gatherer,
isAgent: isAgent, isAgent: isAgent,
statsRenderer: DefaultStatsRenderer, statsRenderer: DefaultStatsRenderer,
notificationsGetter: notificationsGetter,
notificationsSub: notificationsSub,
remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame),
} }
@ -390,6 +398,8 @@ func (api *API) Register(r *route.Router) {
r.Get("/status/flags", wrap(api.serveFlags)) r.Get("/status/flags", wrap(api.serveFlags))
r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus))
r.Get("/status/walreplay", api.serveWALReplayStatus) r.Get("/status/walreplay", api.serveWALReplayStatus)
r.Get("/notifications", api.notifications)
r.Get("/notifications/live", api.notificationsSSE)
r.Post("/read", api.ready(api.remoteRead)) r.Post("/read", api.ready(api.remoteRead))
r.Post("/write", api.ready(api.remoteWrite)) r.Post("/write", api.ready(api.remoteWrite))
r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite)) r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite))
@ -824,12 +834,22 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) {
} }
var ( var (
// MinTime is the default timestamp used for the begin of optional time ranges. // MinTime is the default timestamp used for the start of optional time ranges.
// Exposed to let downstream projects to reference it. // Exposed to let downstream projects reference it.
//
// Historical note: This should just be time.Unix(math.MinInt64/1000, 0).UTC(),
// but it was set to a higher value in the past due to a misunderstanding.
// The value is still low enough for practical purposes, so we don't want
// to change it now, avoiding confusion for importers of this variable.
MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
// MaxTime is the default timestamp used for the end of optional time ranges. // MaxTime is the default timestamp used for the end of optional time ranges.
// Exposed to let downstream projects to reference it. // Exposed to let downstream projects to reference it.
//
// Historical note: This should just be time.Unix(math.MaxInt64/1000, 0).UTC(),
// but it was set to a lower value in the past due to a misunderstanding.
// The value is still high enough for practical purposes, so we don't want
// to change it now, avoiding confusion for importers of this variable.
MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
minTimeFormatted = MinTime.Format(time.RFC3339Nano) minTimeFormatted = MinTime.Format(time.RFC3339Nano)
@ -1668,6 +1688,57 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) {
}, nil, "") }, nil, "")
} }
func (api *API) notifications(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, api.CORSOrigin, r)
api.respond(w, r, api.notificationsGetter(), nil, "")
}
func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, api.CORSOrigin, r)
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
// Subscribe to notifications.
notifications, unsubscribe, ok := api.notificationsSub()
if !ok {
w.WriteHeader(http.StatusNoContent)
return
}
defer unsubscribe()
// Set up a flusher to push the response to the client.
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming unsupported", http.StatusInternalServerError)
return
}
// Flush the response to ensure the headers are immediately and eventSource
// onopen is triggered client-side.
flusher.Flush()
for {
select {
case notification := <-notifications:
// Marshal the notification to JSON.
jsonData, err := json.Marshal(notification)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
continue
}
// Write the event data in SSE format with JSON content.
fmt.Fprintf(w, "data: %s\n\n", jsonData)
// Flush the response to ensure the data is sent immediately.
flusher.Flush()
case <-r.Context().Done():
return
}
}
}
func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) {
// This is only really for tests - this will never be nil IRL. // This is only really for tests - this will never be nil IRL.
if api.remoteReadHandler != nil { if api.remoteReadHandler != nil {
@ -1689,7 +1760,7 @@ func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) {
if api.otlpWriteHandler != nil { if api.otlpWriteHandler != nil {
api.otlpWriteHandler.ServeHTTP(w, r) api.otlpWriteHandler.ServeHTTP(w, r)
} else { } else {
http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound) http.Error(w, "otlp write receiver needs to be enabled with --web.enable-otlp-receiver", http.StatusNotFound)
} }
} }

View file

@ -4176,7 +4176,7 @@ func TestExtractQueryOpts(t *testing.T) {
if test.err == nil { if test.err == nil {
require.NoError(t, err) require.NoError(t, err)
} else { } else {
require.Equal(t, test.err.Error(), err.Error()) require.EqualError(t, err, test.err.Error())
} }
}) })
} }

View file

@ -134,6 +134,8 @@ func createPrometheusAPI(t *testing.T, q storage.SampleAndChunkQueryable) *route
regexp.MustCompile(".*"), regexp.MustCompile(".*"),
func() (RuntimeInfo, error) { return RuntimeInfo{}, errors.New("not implemented") }, func() (RuntimeInfo, error) { return RuntimeInfo{}, errors.New("not implemented") },
&PrometheusVersion{}, &PrometheusVersion{},
nil,
nil,
prometheus.DefaultGatherer, prometheus.DefaultGatherer,
nil, nil,
nil, nil,

View file

@ -12,30 +12,31 @@
"test": "vitest" "test": "vitest"
}, },
"dependencies": { "dependencies": {
"@codemirror/autocomplete": "^6.18.0", "@codemirror/autocomplete": "^6.18.1",
"@codemirror/language": "^6.10.2", "@codemirror/language": "^6.10.2",
"@codemirror/lint": "^6.8.1", "@codemirror/lint": "^6.8.1",
"@codemirror/state": "^6.4.1", "@codemirror/state": "^6.4.1",
"@codemirror/view": "^6.33.0", "@codemirror/view": "^6.34.1",
"@floating-ui/dom": "^1.6.7", "@floating-ui/dom": "^1.6.7",
"@lezer/common": "^1.2.1", "@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.1", "@lezer/highlight": "^1.2.1",
"@mantine/code-highlight": "^7.11.2", "@mantine/code-highlight": "^7.13.1",
"@mantine/core": "^7.11.2", "@mantine/core": "^7.11.2",
"@mantine/dates": "^7.11.2", "@mantine/dates": "^7.13.1",
"@mantine/hooks": "^7.11.2", "@mantine/hooks": "^7.11.2",
"@mantine/notifications": "^7.11.2", "@mantine/notifications": "^7.13.1",
"@microsoft/fetch-event-source": "^2.0.1",
"@nexucis/fuzzy": "^0.5.1", "@nexucis/fuzzy": "^0.5.1",
"@nexucis/kvsearch": "^0.9.1", "@nexucis/kvsearch": "^0.9.1",
"@prometheus-io/codemirror-promql": "0.300.0-beta.0", "@prometheus-io/codemirror-promql": "0.300.0-beta.0",
"@reduxjs/toolkit": "^2.2.1", "@reduxjs/toolkit": "^2.2.1",
"@tabler/icons-react": "^2.47.0", "@tabler/icons-react": "^3.19.0",
"@tanstack/react-query": "^5.22.2", "@tanstack/react-query": "^5.59.0",
"@testing-library/jest-dom": "^6.5.0", "@testing-library/jest-dom": "^6.5.0",
"@testing-library/react": "^16.0.1", "@testing-library/react": "^16.0.1",
"@types/lodash": "^4.17.7", "@types/lodash": "^4.17.9",
"@types/sanitize-html": "^2.13.0", "@types/sanitize-html": "^2.13.0",
"@uiw/react-codemirror": "^4.23.1", "@uiw/react-codemirror": "^4.23.3",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"dayjs": "^1.11.10", "dayjs": "^1.11.10",
"lodash": "^4.17.21", "lodash": "^4.17.21",
@ -43,7 +44,7 @@
"react-dom": "^18.3.1", "react-dom": "^18.3.1",
"react-infinite-scroll-component": "^6.1.0", "react-infinite-scroll-component": "^6.1.0",
"react-redux": "^9.1.2", "react-redux": "^9.1.2",
"react-router-dom": "^6.26.1", "react-router-dom": "^6.26.2",
"sanitize-html": "^2.13.0", "sanitize-html": "^2.13.0",
"uplot": "^1.6.30", "uplot": "^1.6.30",
"uplot-react": "^1.2.2", "uplot-react": "^1.2.2",
@ -52,21 +53,21 @@
"devDependencies": { "devDependencies": {
"@eslint/compat": "^1.1.1", "@eslint/compat": "^1.1.1",
"@eslint/eslintrc": "^3.1.0", "@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.9.1", "@eslint/js": "^9.11.1",
"@types/react": "^18.3.5", "@types/react": "^18.3.5",
"@types/react-dom": "^18.3.0", "@types/react-dom": "^18.3.0",
"@typescript-eslint/eslint-plugin": "^6.21.0", "@typescript-eslint/eslint-plugin": "^6.21.0",
"@typescript-eslint/parser": "^6.21.0", "@typescript-eslint/parser": "^6.21.0",
"@vitejs/plugin-react": "^4.2.1", "@vitejs/plugin-react": "^4.2.1",
"eslint": "^9.9.1", "eslint": "^9.11.1",
"eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830", "eslint-plugin-react-hooks": "^5.1.0-rc-e56f4ae3-20240830",
"eslint-plugin-react-refresh": "^0.4.11", "eslint-plugin-react-refresh": "^0.4.12",
"globals": "^15.9.0", "globals": "^15.10.0",
"jsdom": "^25.0.0", "jsdom": "^25.0.1",
"postcss": "^8.4.35", "postcss": "^8.4.47",
"postcss-preset-mantine": "^1.17.0", "postcss-preset-mantine": "^1.17.0",
"postcss-simple-vars": "^7.0.1", "postcss-simple-vars": "^7.0.1",
"vite": "^5.1.0", "vite": "^5.4.8",
"vitest": "^2.0.5" "vitest": "^2.1.1"
} }
} }

View file

@ -56,14 +56,15 @@ import TSDBStatusPage from "./pages/TSDBStatusPage";
import FlagsPage from "./pages/FlagsPage"; import FlagsPage from "./pages/FlagsPage";
import ConfigPage from "./pages/ConfigPage"; import ConfigPage from "./pages/ConfigPage";
import AgentPage from "./pages/AgentPage"; import AgentPage from "./pages/AgentPage";
import { Suspense, useEffect } from "react"; import { Suspense } from "react";
import ErrorBoundary from "./components/ErrorBoundary"; import ErrorBoundary from "./components/ErrorBoundary";
import { ThemeSelector } from "./components/ThemeSelector"; import { ThemeSelector } from "./components/ThemeSelector";
import { Notifications } from "@mantine/notifications"; import { Notifications } from "@mantine/notifications";
import { useAppDispatch } from "./state/hooks"; import { useSettings } from "./state/settingsSlice";
import { updateSettings, useSettings } from "./state/settingsSlice";
import SettingsMenu from "./components/SettingsMenu"; import SettingsMenu from "./components/SettingsMenu";
import ReadinessWrapper from "./components/ReadinessWrapper"; import ReadinessWrapper from "./components/ReadinessWrapper";
import NotificationsProvider from "./components/NotificationsProvider";
import NotificationsIcon from "./components/NotificationsIcon";
import { QueryParamProvider } from "use-query-params"; import { QueryParamProvider } from "use-query-params";
import { ReactRouter6Adapter } from "use-query-params/adapters/react-router-6"; import { ReactRouter6Adapter } from "use-query-params/adapters/react-router-6";
import ServiceDiscoveryPage from "./pages/service-discovery/ServiceDiscoveryPage"; import ServiceDiscoveryPage from "./pages/service-discovery/ServiceDiscoveryPage";
@ -170,37 +171,12 @@ const theme = createTheme({
}, },
}); });
// This dynamically/generically determines the pathPrefix by stripping the first known
// endpoint suffix from the window location path. It works out of the box for both direct
// hosting and reverse proxy deployments with no additional configurations required.
const getPathPrefix = (path: string) => {
if (path.endsWith("/")) {
path = path.slice(0, -1);
}
const pagePaths = [
...mainNavPages,
...allStatusPages,
{ path: "/agent" },
].map((p) => p.path);
const pagePath = pagePaths.find((p) => path.endsWith(p));
return path.slice(0, path.length - (pagePath || "").length);
};
const navLinkXPadding = "md"; const navLinkXPadding = "md";
function App() { function App() {
const [opened, { toggle }] = useDisclosure(); const [opened, { toggle }] = useDisclosure();
const pathPrefix = getPathPrefix(window.location.pathname); const { agentMode, consolesLink, pathPrefix } = useSettings();
const dispatch = useAppDispatch();
useEffect(() => {
dispatch(updateSettings({ pathPrefix }));
}, [pathPrefix, dispatch]);
const { agentMode, consolesLink } = useSettings();
const navLinks = ( const navLinks = (
<> <>
@ -314,6 +290,7 @@ function App() {
const navActionIcons = ( const navActionIcons = (
<> <>
<ThemeSelector /> <ThemeSelector />
<NotificationsIcon />
<SettingsMenu /> <SettingsMenu />
<ActionIcon <ActionIcon
component="a" component="a"
@ -347,6 +324,7 @@ function App() {
}} }}
padding="md" padding="md"
> >
<NotificationsProvider>
<AppShell.Header bg="rgb(65, 73, 81)" c="#fff"> <AppShell.Header bg="rgb(65, 73, 81)" c="#fff">
<Group h="100%" px="md" wrap="nowrap"> <Group h="100%" px="md" wrap="nowrap">
<Group <Group
@ -388,6 +366,7 @@ function App() {
{navActionIcons} {navActionIcons}
</Group> </Group>
</AppShell.Navbar> </AppShell.Navbar>
</NotificationsProvider>
<AppShell.Main> <AppShell.Main>
<ErrorBoundary key={location.pathname}> <ErrorBoundary key={location.pathname}>

View file

@ -93,6 +93,7 @@ type QueryOptions = {
path: string; path: string;
params?: Record<string, string>; params?: Record<string, string>;
enabled?: boolean; enabled?: boolean;
refetchInterval?: false | number;
recordResponseTime?: (time: number) => void; recordResponseTime?: (time: number) => void;
}; };
@ -102,6 +103,7 @@ export const useAPIQuery = <T>({
params, params,
enabled, enabled,
recordResponseTime, recordResponseTime,
refetchInterval,
}: QueryOptions) => { }: QueryOptions) => {
const { pathPrefix } = useSettings(); const { pathPrefix } = useSettings();
@ -109,6 +111,7 @@ export const useAPIQuery = <T>({
queryKey: key !== undefined ? key : [path, params], queryKey: key !== undefined ? key : [path, params],
retry: false, retry: false,
refetchOnWindowFocus: false, refetchOnWindowFocus: false,
refetchInterval: refetchInterval,
gcTime: 0, gcTime: 0,
enabled, enabled,
queryFn: createQueryFn({ pathPrefix, path, params, recordResponseTime }), queryFn: createQueryFn({ pathPrefix, path, params, recordResponseTime }),

View file

@ -0,0 +1,8 @@
export interface Notification {
text: string;
date: string;
active: boolean;
modified: boolean;
}
export type NotificationsResult = Notification[];

View file

@ -1,12 +1,12 @@
import { Card, Group } from "@mantine/core"; import { Card, Group } from "@mantine/core";
import { TablerIconsProps } from "@tabler/icons-react"; import { IconProps } from "@tabler/icons-react";
import { FC, ReactNode } from "react"; import { FC, ReactNode } from "react";
import { infoPageCardTitleIconStyle } from "../styles"; import { infoPageCardTitleIconStyle } from "../styles";
const InfoPageCard: FC<{ const InfoPageCard: FC<{
children: ReactNode; children: ReactNode;
title?: string; title?: string;
icon?: React.ComponentType<TablerIconsProps>; icon?: React.ComponentType<IconProps>;
}> = ({ children, title, icon: Icon }) => { }> = ({ children, title, icon: Icon }) => {
return ( return (
<Card shadow="xs" withBorder p="md"> <Card shadow="xs" withBorder p="md">

View file

@ -0,0 +1,106 @@
import {
ActionIcon,
Indicator,
Popover,
Card,
Text,
Stack,
ScrollArea,
Group,
rem,
} from "@mantine/core";
import {
IconAlertTriangle,
IconNetworkOff,
IconMessageExclamation,
} from "@tabler/icons-react";
import { useNotifications } from "../state/useNotifications";
import { actionIconStyle } from "../styles";
import { useSettings } from "../state/settingsSlice";
import { formatTimestamp } from "../lib/formatTime";
const NotificationsIcon = () => {
const { notifications, isConnectionError } = useNotifications();
const { useLocalTime } = useSettings();
return notifications.length === 0 && !isConnectionError ? null : (
<Indicator
color={"red"}
size={16}
label={isConnectionError ? "!" : notifications.length}
>
<Popover position="bottom-end" shadow="md" withArrow>
<Popover.Target>
<ActionIcon
color="gray"
title="Notifications"
aria-label="Notifications"
size={32}
>
<IconMessageExclamation style={actionIconStyle} />
</ActionIcon>
</Popover.Target>
<Popover.Dropdown>
<Stack gap="xs">
<Text fw={700} size="xs" c="dimmed" ta="center">
Notifications
</Text>
<ScrollArea.Autosize mah={200}>
{isConnectionError ? (
<Card p="xs" color="red">
<Group wrap="nowrap">
<IconNetworkOff
color="red"
style={{ width: rem(20), height: rem(20) }}
/>
<Stack gap="0">
<Text size="sm" fw={500}>
Real-time notifications interrupted.
</Text>
<Text size="xs" c="dimmed">
Please refresh the page or check your connection.
</Text>
</Stack>
</Group>
</Card>
) : notifications.length === 0 ? (
<Text ta="center" c="dimmed">
No notifications
</Text>
) : (
notifications.map((notification, index) => (
<Card key={index} p="xs">
<Group wrap="nowrap" align="flex-start">
<IconAlertTriangle
color="red"
style={{
width: rem(20),
height: rem(20),
marginTop: rem(3),
}}
/>
<Stack maw={250} gap={5}>
<Text size="sm" fw={500}>
{notification.text}
</Text>
<Text size="xs" c="dimmed">
{formatTimestamp(
new Date(notification.date).valueOf() / 1000,
useLocalTime
)}
</Text>
</Stack>
</Group>
</Card>
))
)}
</ScrollArea.Autosize>
</Stack>
</Popover.Dropdown>
</Popover>
</Indicator>
);
};
export default NotificationsIcon;

View file

@ -0,0 +1,79 @@
import React, { useEffect, useState } from 'react';
import { useSettings } from '../state/settingsSlice';
import { NotificationsContext } from '../state/useNotifications';
import { Notification, NotificationsResult } from "../api/responseTypes/notifications";
import { useAPIQuery } from '../api/api';
import { fetchEventSource } from '@microsoft/fetch-event-source';
export const NotificationsProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => {
const { pathPrefix } = useSettings();
const [notifications, setNotifications] = useState<Notification[]>([]);
const [isConnectionError, setIsConnectionError] = useState(false);
const [shouldFetchFromAPI, setShouldFetchFromAPI] = useState(false);
const { data, isError } = useAPIQuery<NotificationsResult>({
path: '/notifications',
enabled: shouldFetchFromAPI,
refetchInterval: 10000,
});
useEffect(() => {
if (data && data.data) {
setNotifications(data.data);
}
setIsConnectionError(isError);
}, [data, isError]);
useEffect(() => {
const controller = new AbortController();
fetchEventSource(`${pathPrefix}/api/v1/notifications/live`, {
signal: controller.signal,
async onopen(response) {
if (response.ok) {
if (response.status === 200) {
setNotifications([]);
setIsConnectionError(false);
} else if (response.status === 204) {
controller.abort();
setShouldFetchFromAPI(true);
}
} else {
setIsConnectionError(true);
throw new Error(`Unexpected response: ${response.status} ${response.statusText}`);
}
},
onmessage(event) {
const notification: Notification = JSON.parse(event.data);
setNotifications((prev: Notification[]) => {
const updatedNotifications = [...prev.filter((n: Notification) => n.text !== notification.text)];
if (notification.active) {
updatedNotifications.push(notification);
}
return updatedNotifications;
});
},
onclose() {
throw new Error("Server closed the connection");
},
onerror() {
setIsConnectionError(true);
return 5000;
},
});
return () => {
controller.abort();
};
}, [pathPrefix]);
return (
<NotificationsContext.Provider value={{ notifications, isConnectionError }}>
{children}
</NotificationsContext.Provider>
);
};
export default NotificationsProvider;

Some files were not shown because too many files have changed in this diff Show more