Merge branch 'main' into merge-2.51.2-into-main

Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
Bryan Boreham 2024-04-10 15:05:52 +01:00 committed by GitHub
commit e1dd8e72df
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
128 changed files with 5520 additions and 3630 deletions

2
.github/CODEOWNERS vendored
View file

@ -1,7 +1,7 @@
/web/ui @juliusv /web/ui @juliusv
/web/ui/module @juliusv @nexucis /web/ui/module @juliusv @nexucis
/storage/remote @cstyan @bwplotka @tomwilkie /storage/remote @cstyan @bwplotka @tomwilkie
/storage/remote/otlptranslator @gouthamve @jesusvazquez /storage/remote/otlptranslator @aknuds1 @jesusvazquez
/discovery/kubernetes @brancz /discovery/kubernetes @brancz
/tsdb @jesusvazquez /tsdb @jesusvazquez
/promql @roidelapluie /promql @roidelapluie

View file

@ -6,11 +6,11 @@ updates:
interval: "monthly" interval: "monthly"
groups: groups:
k8s.io: k8s.io:
patterns: patterns:
- "k8s.io/*" - "k8s.io/*"
go.opentelemetry.io: go.opentelemetry.io:
patterns: patterns:
- "go.opentelemetry.io/*" - "go.opentelemetry.io/*"
- package-ecosystem: "gomod" - package-ecosystem: "gomod"
directory: "/documentation/examples/remote_storage" directory: "/documentation/examples/remote_storage"
schedule: schedule:

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0 - uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0 - uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0

View file

@ -45,7 +45,8 @@ jobs:
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: make build - run: make build
- run: make test GO_ONLY=1 # Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
test_ui: test_ui:
name: UI tests name: UI tests
@ -157,11 +158,11 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.55.2 version: v1.56.2
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
@ -171,7 +172,7 @@ jobs:
publish_main: publish_main:
name: Publish main branch artifacts name: Publish main branch artifacts
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
@ -185,7 +186,7 @@ jobs:
publish_release: publish_release:
name: Publish release artefacts name: Publish release artefacts
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1

View file

@ -0,0 +1,52 @@
---
name: Push README to Docker Hub
on:
push:
paths:
- "README.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
permissions:
contents: read
jobs:
PushDockerHubReadme:
runs-on: ubuntu-latest
name: Push README to Docker Hub
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
readme_file: 'README.md'
PushQuayIoReadme:
runs-on: ubuntu-latest
name: Push README to quay.io
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to quay.io
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
readme_file: 'README.md'

View file

@ -26,6 +26,7 @@ linters:
- testifylint - testifylint
- unconvert - unconvert
- unused - unused
- usestdlibvars
issues: issues:
max-same-issues: 0 max-same-issues: 0
@ -48,24 +49,24 @@ linters-settings:
rules: rules:
main: main:
deny: deny:
- pkg: "sync/atomic" - pkg: "sync/atomic"
desc: "Use go.uber.org/atomic instead of sync/atomic" desc: "Use go.uber.org/atomic instead of sync/atomic"
- pkg: "github.com/stretchr/testify/assert" - pkg: "github.com/stretchr/testify/assert"
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- pkg: "github.com/go-kit/kit/log" - pkg: "github.com/go-kit/kit/log"
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- pkg: "io/ioutil" - pkg: "io/ioutil"
desc: "Use corresponding 'os' or 'io' functions instead." desc: "Use corresponding 'os' or 'io' functions instead."
- pkg: "regexp" - pkg: "regexp"
desc: "Use github.com/grafana/regexp instead of regexp" desc: "Use github.com/grafana/regexp instead of regexp"
- pkg: "github.com/pkg/errors" - pkg: "github.com/pkg/errors"
desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors"
- pkg: "gzip" - pkg: "gzip"
desc: "Use github.com/klauspost/compress instead of gzip" desc: "Use github.com/klauspost/compress instead of gzip"
- pkg: "zlib" - pkg: "zlib"
desc: "Use github.com/klauspost/compress instead of zlib" desc: "Use github.com/klauspost/compress instead of zlib"
- pkg: "golang.org/x/exp/slices" - pkg: "golang.org/x/exp/slices"
desc: "Use 'slices' instead." desc: "Use 'slices' instead."
errcheck: errcheck:
exclude-functions: exclude-functions:
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)". # Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
@ -135,4 +136,3 @@ linters-settings:
- require-error - require-error
- suite-dont-use-pkg - suite-dont-use-pkg
- suite-extra-assert-call - suite-extra-assert-call

View file

@ -1,5 +1,7 @@
--- ---
extends: default extends: default
ignore: |
ui/react-app/node_modules
rules: rules:
braces: braces:

View file

@ -1,5 +1,9 @@
# Changelog # Changelog
## unreleased
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
## 2.51.2 / 2024-04-09 ## 2.51.2 / 2024-04-09
Bugfix release. Bugfix release.

View file

@ -90,7 +90,7 @@ can modify the `./promql/parser/generated_parser.y.go` manually.
```golang ```golang
// As of writing this was somewhere around line 600. // As of writing this was somewhere around line 600.
var ( var (
yyDebug = 0 // This can be be a number 0 -> 5. yyDebug = 0 // This can be a number 0 -> 5.
yyErrorVerbose = false // This can be set to true. yyErrorVerbose = false // This can be set to true.
) )

View file

@ -1,7 +1,12 @@
# Maintainers # Maintainers
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers: General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
Maintainers for specific parts of the codebase:
* `cmd` * `cmd`
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl) * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
* `discovery` * `discovery`
@ -12,6 +17,7 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama) George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `storage` * `storage`
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie) * `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto) * `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
* `web` * `web`
@ -19,7 +25,6 @@ George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis) * `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ) * `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
For the sake of brevity, not all subtrees are explicitly listed. Due to the For the sake of brevity, not all subtrees are explicitly listed. Due to the
size of this repository, the natural changes in focus of maintainers over time, size of this repository, the natural changes in focus of maintainers over time,
and nuances of where particular features live, this list will always be and nuances of where particular features live, this list will always be

View file

@ -82,7 +82,7 @@ assets-tarball: assets
.PHONY: parser .PHONY: parser
parser: parser:
@echo ">> running goyacc to generate the .go file." @echo ">> running goyacc to generate the .go file."
ifeq (, $(shell command -v goyacc > /dev/null)) ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping" @echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
else else

View file

@ -49,7 +49,7 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),) ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.55.2 GOLANGCI_LINT_VERSION ?= v1.56.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -182,7 +182,7 @@ endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null)) ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .
@ -208,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:

View file

@ -56,6 +56,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) | | v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) | | v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) | | v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -447,7 +447,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig) promlogflag.AddFlags(a, &cfg.promlogConfig)
@ -960,8 +960,8 @@ func main() {
func() error { func() error {
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally. // Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
select { select {
case <-term: case sig := <-term:
level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...") level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String())
reloadReady.Close() reloadReady.Close()
case <-webHandler.Quit(): case <-webHandler.Quit():
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")

View file

@ -482,7 +482,7 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
return err return err
} }
request, err := http.NewRequest("GET", config.Address, nil) request, err := http.NewRequest(http.MethodGet, config.Address, nil)
if err != nil { if err != nil {
return err return err
} }

View file

@ -234,17 +234,3 @@ func (m *multipleAppender) flushAndCommit(ctx context.Context) error {
} }
return nil return nil
} }
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}

View file

@ -12,4 +12,4 @@ tests:
eval_time: 1m eval_time: 1m
exp_samples: exp_samples:
- value: 1 - value: 1
labels: test labels: test

View file

@ -33,6 +33,7 @@ import (
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
@ -149,8 +150,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
} }
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) { func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
var mu sync.Mutex var total atomic.Uint64
var total uint64
for i := 0; i < scrapeCount; i += 100 { for i := 0; i < scrapeCount; i += 100 {
var wg sync.WaitGroup var wg sync.WaitGroup
@ -165,22 +165,21 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done()
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i)) n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil { if err != nil {
// exitWithError(err) // exitWithError(err)
fmt.Println(" err", err) fmt.Println(" err", err)
} }
mu.Lock() total.Add(n)
total += n
mu.Unlock()
wg.Done()
}() }()
} }
wg.Wait() wg.Wait()
} }
fmt.Println("ingestion completed") fmt.Println("ingestion completed")
return total, nil return total.Load(), nil
} }
func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) { func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) {

View file

@ -175,13 +175,18 @@ type testGroup struct {
} }
// test performs the unit tests. // test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) []error { func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite. // Setup testing suite.
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts) suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil { if err != nil {
return []error{err} return []error{err}
} }
defer suite.Close() defer func() {
err := suite.Close()
if err != nil {
outErr = append(outErr, err)
}
}()
suite.SubqueryInterval = evalInterval suite.SubqueryInterval = evalInterval
// Load the rule files. // Load the rule files.

View file

@ -1840,7 +1840,7 @@ var expectedErrors = []struct {
}, },
{ {
filename: "azure_authentication_method.bad.yml", filename: "azure_authentication_method.bad.yml",
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"", errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\", \"ManagedIdentity\" or \"SDK\"",
}, },
{ {
filename: "azure_bearertoken_basicauth.bad.yml", filename: "azure_bearertoken_basicauth.bad.yml",

View file

@ -65,6 +65,7 @@ const (
azureLabelMachineSize = azureLabel + "machine_size" azureLabelMachineSize = azureLabel + "machine_size"
authMethodOAuth = "OAuth" authMethodOAuth = "OAuth"
authMethodSDK = "SDK"
authMethodManagedIdentity = "ManagedIdentity" authMethodManagedIdentity = "ManagedIdentity"
) )
@ -164,8 +165,8 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
} }
if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity { if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity && c.AuthenticationMethod != authMethodSDK {
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity) return fmt.Errorf("unknown authentication_type %q. Supported types are %q, %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity, authMethodSDK)
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
@ -212,6 +213,14 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.Discoverer
return d, nil return d, nil
} }
type client interface {
getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error)
getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error)
getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error)
getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error)
getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error)
}
// azureClient represents multiple Azure Resource Manager providers. // azureClient represents multiple Azure Resource Manager providers.
type azureClient struct { type azureClient struct {
nic *armnetwork.InterfacesClient nic *armnetwork.InterfacesClient
@ -221,14 +230,17 @@ type azureClient struct {
logger log.Logger logger log.Logger
} }
var _ client = &azureClient{}
// createAzureClient is a helper function for creating an Azure compute client to ARM. // createAzureClient is a helper function for creating an Azure compute client to ARM.
func createAzureClient(cfg SDConfig) (azureClient, error) { func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
var c azureClient var c azureClient
c.logger = logger
telemetry := policy.TelemetryOptions{ telemetry := policy.TelemetryOptions{
ApplicationID: userAgent, ApplicationID: userAgent,
@ -239,12 +251,12 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
Telemetry: telemetry, Telemetry: telemetry,
}) })
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
options := &arm.ClientOptions{ options := &arm.ClientOptions{
ClientOptions: policy.ClientOptions{ ClientOptions: policy.ClientOptions{
@ -256,25 +268,25 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options) c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options) c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options) c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options) c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
return c, nil return &c, nil
} }
func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) { func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) {
@ -294,6 +306,16 @@ func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azco
return nil, err return nil, err
} }
credential = azcore.TokenCredential(secretCredential) credential = azcore.TokenCredential(secretCredential)
case authMethodSDK:
options := &azidentity.DefaultAzureCredentialOptions{ClientOptions: policyClientOptions}
if len(cfg.TenantID) != 0 {
options.TenantID = cfg.TenantID
}
sdkCredential, err := azidentity.NewDefaultAzureCredential(options)
if err != nil {
return nil, err
}
credential = azcore.TokenCredential(sdkCredential)
} }
return credential, nil return credential, nil
} }
@ -330,12 +352,11 @@ func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, erro
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
defer level.Debug(d.logger).Log("msg", "Azure discovery completed") defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
client, err := createAzureClient(*d.cfg) client, err := createAzureClient(*d.cfg, d.logger)
if err != nil { if err != nil {
d.metrics.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not create Azure client: %w", err) return nil, fmt.Errorf("could not create Azure client: %w", err)
} }
client.logger = d.logger
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
if err != nil { if err != nil {
@ -374,96 +395,8 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
for _, vm := range machines { for _, vm := range machines {
go func(vm virtualMachine) { go func(vm virtualMachine) {
defer wg.Done() defer wg.Done()
r, err := newAzureResourceFromID(vm.ID, d.logger) labelSet, err := d.vmToLabelSet(ctx, client, vm)
if err != nil { ch <- target{labelSet: labelSet, err: err}
ch <- target{labelSet: nil, err: err}
return
}
labels := model.LabelSet{
azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID),
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet)
}
for k, v := range vm.Tags {
name := strutil.SanitizeLabelName(k)
labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v)
}
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
var networkInterface *armnetwork.Interface
if v, ok := d.getFromCache(nicID); ok {
networkInterface = v
d.metrics.cacheHitCount.Add(1)
} else {
if vm.ScaleSet == "" {
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
}
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
ch <- target{labelSet: nil, err: err}
}
// Get out of this routine because we cannot continue without a network interface.
return
}
// Continue processing with the network interface
d.addToCache(nicID, networkInterface)
}
if networkInterface.Properties == nil {
continue
}
// Unfortunately Azure does not return information on whether a VM is deallocated.
// This information is available via another API call however the Go SDK does not
// yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return
}
if *networkInterface.Properties.Primary {
for _, ip := range networkInterface.Properties.IPConfigurations {
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(address)
ch <- target{labelSet: labels, err: nil}
return
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
err = fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
ch <- target{labelSet: nil, err: err}
return
}
}
}
}(vm) }(vm)
} }
@ -484,6 +417,95 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return []*targetgroup.Group{&tg}, nil return []*targetgroup.Group{&tg}, nil
} }
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
r, err := newAzureResourceFromID(vm.ID, d.logger)
if err != nil {
return nil, err
}
labels := model.LabelSet{
azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID),
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet)
}
for k, v := range vm.Tags {
name := strutil.SanitizeLabelName(k)
labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v)
}
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
var networkInterface *armnetwork.Interface
if v, ok := d.getFromCache(nicID); ok {
networkInterface = v
d.metrics.cacheHitCount.Add(1)
} else {
if vm.ScaleSet == "" {
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
}
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
return nil, err
}
// Get out of this routine because we cannot continue without a network interface.
return nil, nil
}
// Continue processing with the network interface
d.addToCache(nicID, networkInterface)
}
if networkInterface.Properties == nil {
continue
}
// Unfortunately Azure does not return information on whether a VM is deallocated.
// This information is available via another API call however the Go SDK does not
// yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return nil, nil
}
if *networkInterface.Properties.Primary {
for _, ip := range networkInterface.Properties.IPConfigurations {
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
return nil, fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
}
}
}
// TODO: Should we say something at this point?
return nil, nil
}
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
var vms []virtualMachine var vms []virtualMachine
if len(resourceGroup) == 0 { if len(resourceGroup) == 0 {

View file

@ -14,16 +14,24 @@
package azure package azure
import ( import (
"context"
"fmt"
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
goleak.VerifyTestMain(m) goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
)
} }
func TestMapFromVMWithEmptyTags(t *testing.T) { func TestMapFromVMWithEmptyTags(t *testing.T) {
@ -79,6 +87,91 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
require.Equal(t, expectedVM, actualVM) require.Equal(t, expectedVM, actualVM)
} }
func TestVMToLabelSet(t *testing.T) {
id := "/subscriptions/00000000-0000-0000-0000-000000000000/test"
name := "name"
size := "size"
vmSize := armcompute.VirtualMachineSizeTypes(size)
osType := armcompute.OperatingSystemTypesLinux
vmType := "type"
location := "westeurope"
computerName := "computer_name"
networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1"
ipAddress := "10.20.30.40"
primary := true
networkProfile := armcompute.NetworkProfile{
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
{
ID: &networkID,
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
},
},
}
properties := &armcompute.VirtualMachineProperties{
OSProfile: &armcompute.OSProfile{
ComputerName: &computerName,
},
StorageProfile: &armcompute.StorageProfile{
OSDisk: &armcompute.OSDisk{
OSType: &osType,
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &armcompute.HardwareProfile{
VMSize: &vmSize,
},
}
testVM := armcompute.VirtualMachine{
ID: &id,
Name: &name,
Type: &vmType,
Location: &location,
Tags: nil,
Properties: properties,
}
expectedVM := virtualMachine{
ID: id,
Name: name,
ComputerName: computerName,
Type: vmType,
Location: location,
OsType: "Linux",
Tags: map[string]*string{},
NetworkInterfaces: []string{networkID},
Size: size,
}
actualVM := mapFromVM(testVM)
require.Equal(t, expectedVM, actualVM)
cfg := DefaultSDConfig
d := &Discovery{
cfg: &cfg,
logger: log.NewNopLogger(),
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
}
network := armnetwork.Interface{
Name: &networkID,
Properties: &armnetwork.InterfacePropertiesFormat{
Primary: &primary,
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
{Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddress: &ipAddress,
}},
},
},
}
client := &mockAzureClient{
networkInterface: &network,
}
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
require.NoError(t, err)
require.Len(t, labelSet, 11)
}
func TestMapFromVMWithEmptyOSType(t *testing.T) { func TestMapFromVMWithEmptyOSType(t *testing.T) {
id := "test" id := "test"
name := "name" name := "name"
@ -381,3 +474,35 @@ func TestNewAzureResourceFromID(t *testing.T) {
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
} }
} }
type mockAzureClient struct {
networkInterface *armnetwork.Interface
}
var _ client = &mockAzureClient{}
func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
return nil, nil
}
func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
if networkInterfaceID == "" {
return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty")
}
return m.networkInterface, nil
}
func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
if scaleSetName == "" {
return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty")
}
return m.networkInterface, nil
}

View file

@ -81,7 +81,7 @@ const appListPath string = "/apps"
func fetchApps(ctx context.Context, server string, client *http.Client) (*Applications, error) { func fetchApps(ctx context.Context, server string, client *http.Client) (*Applications, error) {
url := fmt.Sprintf("%s%s", server, appListPath) url := fmt.Sprintf("%s%s", server, appListPath)
request, err := http.NewRequest("GET", url, nil) request, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -208,7 +208,6 @@ func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group)
select { select {
case <-timeout: case <-timeout:
t.Fatalf("Expected update but got none") t.Fatalf("Expected update but got none")
return
case <-time.After(defaultWait / 10): case <-time.After(defaultWait / 10):
if ref.Equal(t.lastReceive()) { if ref.Equal(t.lastReceive()) {
// No update received. // No update received.

View file

@ -70,7 +70,7 @@ func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
} }
func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil) req, err := http.NewRequest(http.MethodGet, d.endpoint+"/server", nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -150,7 +150,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPCli
} }
func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.url, nil) req, err := http.NewRequest(http.MethodGet, d.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -311,7 +311,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di
} }
case conf.APIServer.URL == nil: case conf.APIServer.URL == nil:
// Use the Kubernetes provided pod service account // Use the Kubernetes provided pod service account
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/ // as described in https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#using-official-client-libraries
kcfg, err = rest.InClusterConfig() kcfg, err = rest.InClusterConfig()
if err != nil { if err != nil {
return nil, err return nil, err
@ -485,8 +485,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
eps := NewEndpointSlice( eps := NewEndpointSlice(
log.With(d.logger, "role", "endpointslice"), log.With(d.logger, "role", "endpointslice"),
informer, informer,
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
nodeInf, nodeInf,
d.metrics.eventCount, d.metrics.eventCount,
) )
@ -545,8 +545,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
eps := NewEndpoints( eps := NewEndpoints(
log.With(d.logger, "role", "endpoint"), log.With(d.logger, "role", "endpoint"),
d.newEndpointsByNodeInformer(elw), d.newEndpointsByNodeInformer(elw),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
nodeInf, nodeInf,
d.metrics.eventCount, d.metrics.eventCount,
) )
@ -602,7 +602,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
svc := NewService( svc := NewService(
log.With(d.logger, "role", "service"), log.With(d.logger, "role", "service"),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
d.metrics.eventCount, d.metrics.eventCount,
) )
d.discoverers = append(d.discoverers, svc) d.discoverers = append(d.discoverers, svc)
@ -641,7 +641,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
return i.Watch(ctx, options) return i.Watch(ctx, options)
}, },
} }
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
} else { } else {
i := d.client.NetworkingV1beta1().Ingresses(namespace) i := d.client.NetworkingV1beta1().Ingresses(namespace)
ilw := &cache.ListWatch{ ilw := &cache.ListWatch{
@ -656,7 +656,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
return i.Watch(ctx, options) return i.Watch(ctx, options)
}, },
} }
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
} }
ingress := NewIngress( ingress := NewIngress(
log.With(d.logger, "role", "ingress"), log.With(d.logger, "role", "ingress"),
@ -747,7 +747,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
return d.client.CoreV1().Nodes().Watch(ctx, options) return d.client.CoreV1().Nodes().Watch(ctx, options)
}, },
} }
return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled)
} }
func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
@ -762,7 +762,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
} }
} }
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers)
} }
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
@ -783,7 +783,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
return pods, nil return pods, nil
} }
if !d.attachMetadata.Node { if !d.attachMetadata.Node {
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
} }
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
@ -809,13 +809,13 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
return nodes, nil return nodes, nil
} }
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
} }
func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc) indexers := make(map[string]cache.IndexFunc)
if !d.attachMetadata.Node { if !d.attachMetadata.Node {
return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers)
} }
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
@ -854,7 +854,32 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
return nodes, nil return nodes, nil
} }
return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers)
}
func (d *Discovery) informerWatchErrorHandler(r *cache.Reflector, err error) {
d.metrics.failuresCount.Inc()
cache.DefaultWatchErrorHandler(r, err)
}
func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer {
informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod)
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
// Such a scenario would suggest an incorrect use of the API, thus the panic.
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil {
panic(err)
}
return informer
}
func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers)
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
// Such a scenario would suggest an incorrect use of the API, thus the panic.
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil {
panic(err)
}
return informer
} }
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {

View file

@ -21,12 +21,16 @@ import (
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
fakediscovery "k8s.io/client-go/discovery/fake" fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
kubetesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -314,3 +318,39 @@ func TestCheckNetworkingV1Supported(t *testing.T) {
}) })
} }
} }
func TestFailuresCountMetric(t *testing.T) {
tests := []struct {
role Role
minFailedWatches int
}{
{RoleNode, 1},
{RolePod, 1},
{RoleService, 1},
{RoleEndpoint, 3},
{RoleEndpointSlice, 3},
{RoleIngress, 1},
}
for _, tc := range tests {
tc := tc
t.Run(string(tc.role), func(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(tc.role, NamespaceDiscovery{})
// The counter is initialized and no failures at the beginning.
require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount))
// Simulate an error on watch requests.
c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) {
return true, nil, apierrors.NewUnauthorized("unauthorized")
})
// Start the discovery.
k8sDiscoveryTest{discovery: n}.Run(t)
// At least the errors of the initial watches should be caught (watches are retried on errors).
require.GreaterOrEqual(t, prom_testutil.ToFloat64(n.metrics.failuresCount), float64(tc.minFailedWatches))
})
}
}

View file

@ -22,7 +22,8 @@ import (
var _ discovery.DiscovererMetrics = (*kubernetesMetrics)(nil) var _ discovery.DiscovererMetrics = (*kubernetesMetrics)(nil)
type kubernetesMetrics struct { type kubernetesMetrics struct {
eventCount *prometheus.CounterVec eventCount *prometheus.CounterVec
failuresCount prometheus.Counter
metricRegisterer discovery.MetricRegisterer metricRegisterer discovery.MetricRegisterer
} }
@ -37,10 +38,18 @@ func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetric
}, },
[]string{"role", "event"}, []string{"role", "event"},
), ),
failuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: discovery.KubernetesMetricsNamespace,
Name: "failures_total",
Help: "The number of failed WATCH/LIST requests.",
},
),
} }
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{ m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
m.eventCount, m.eventCount,
m.failuresCount,
}) })
// Initialize metric vectors. // Initialize metric vectors.
@ -61,6 +70,8 @@ func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetric
} }
} }
m.failuresCount.Add(0)
return m return m
} }

View file

@ -733,7 +733,6 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
t.Helper() t.Helper()
if _, ok := tSets[poolKey]; !ok { if _, ok := tSets[poolKey]; !ok {
t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets)
return
} }
match := false match := false

View file

@ -59,17 +59,22 @@ const (
linodeLabelSpecsVCPUs = linodeLabel + "specs_vcpus" linodeLabelSpecsVCPUs = linodeLabel + "specs_vcpus"
linodeLabelSpecsTransferBytes = linodeLabel + "specs_transfer_bytes" linodeLabelSpecsTransferBytes = linodeLabel + "specs_transfer_bytes"
linodeLabelExtraIPs = linodeLabel + "extra_ips" linodeLabelExtraIPs = linodeLabel + "extra_ips"
linodeLabelIPv6Ranges = linodeLabel + "ipv6_ranges"
// This is our events filter; when polling for changes, we care only about // This is our events filter; when polling for changes, we care only about
// events since our last refresh. // events since our last refresh.
// Docs: https://www.linode.com/docs/api/account/#events-list // Docs: https://www.linode.com/docs/api/account/#events-list.
filterTemplate = `{"created": {"+gte": "%s"}}` filterTemplate = `{"created": {"+gte": "%s"}}`
// Optional region filtering.
regionFilterTemplate = `{"region": "%s"}`
) )
// DefaultSDConfig is the default Linode SD configuration. // DefaultSDConfig is the default Linode SD configuration.
var DefaultSDConfig = SDConfig{ var DefaultSDConfig = SDConfig{
TagSeparator: ",", TagSeparator: ",",
Port: 80, Port: 80,
Region: "",
RefreshInterval: model.Duration(60 * time.Second), RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
} }
@ -85,6 +90,7 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval"` RefreshInterval model.Duration `yaml:"refresh_interval"`
Port int `yaml:"port"` Port int `yaml:"port"`
TagSeparator string `yaml:"tag_separator,omitempty"` TagSeparator string `yaml:"tag_separator,omitempty"`
Region string `yaml:"region,omitempty"`
} }
// NewDiscovererMetrics implements discovery.Config. // NewDiscovererMetrics implements discovery.Config.
@ -122,6 +128,7 @@ type Discovery struct {
*refresh.Discovery *refresh.Discovery
client *linodego.Client client *linodego.Client
port int port int
region string
tagSeparator string tagSeparator string
lastRefreshTimestamp time.Time lastRefreshTimestamp time.Time
pollCount int pollCount int
@ -139,6 +146,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
d := &Discovery{ d := &Discovery{
port: conf.Port, port: conf.Port,
region: conf.Region,
tagSeparator: conf.TagSeparator, tagSeparator: conf.TagSeparator,
pollCount: 0, pollCount: 0,
lastRefreshTimestamp: time.Now().UTC(), lastRefreshTimestamp: time.Now().UTC(),
@ -224,16 +232,31 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
tg := &targetgroup.Group{ tg := &targetgroup.Group{
Source: "Linode", Source: "Linode",
} }
opts := linodego.ListOptions{
PageSize: 500,
}
// If region filter provided, use it to constrain results.
if d.region != "" {
opts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
}
// Gather all linode instances. // Gather all linode instances.
instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500}) instances, err := d.client.ListInstances(ctx, &opts)
if err != nil { if err != nil {
d.metrics.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, err return nil, err
} }
// Gather detailed IP address info for all IPs on all linode instances. // Gather detailed IP address info for all IPs on all linode instances.
detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500}) detailedIPs, err := d.client.ListIPAddresses(ctx, &opts)
if err != nil {
d.metrics.failuresCount.Inc()
return nil, err
}
// Gather detailed IPv6 Range info for all linode instances.
ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &opts)
if err != nil { if err != nil {
d.metrics.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, err return nil, err
@ -248,7 +271,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
privateIPv4, publicIPv4, publicIPv6 string privateIPv4, publicIPv4, publicIPv6 string
privateIPv4RDNS, publicIPv4RDNS, publicIPv6RDNS string privateIPv4RDNS, publicIPv4RDNS, publicIPv6RDNS string
backupsStatus string backupsStatus string
extraIPs []string extraIPs, ipv6Ranges []string
) )
for _, ip := range instance.IPv4 { for _, ip := range instance.IPv4 {
@ -276,17 +299,23 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
} }
if instance.IPv6 != "" { if instance.IPv6 != "" {
slaac := strings.Split(instance.IPv6, "/")[0]
for _, detailedIP := range detailedIPs { for _, detailedIP := range detailedIPs {
if detailedIP.Address != strings.Split(instance.IPv6, "/")[0] { if detailedIP.Address != slaac {
continue continue
} }
publicIPv6 = detailedIP.Address publicIPv6 = detailedIP.Address
if detailedIP.RDNS != "" && detailedIP.RDNS != "null" { if detailedIP.RDNS != "" && detailedIP.RDNS != "null" {
publicIPv6RDNS = detailedIP.RDNS publicIPv6RDNS = detailedIP.RDNS
} }
} }
for _, ipv6Range := range ipv6RangeList {
if ipv6Range.RouteTarget != slaac {
continue
}
ipv6Ranges = append(ipv6Ranges, fmt.Sprintf("%s/%d", ipv6Range.Range, ipv6Range.Prefix))
}
} }
if instance.Backups.Enabled { if instance.Backups.Enabled {
@ -330,12 +359,20 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
if len(extraIPs) > 0 { if len(extraIPs) > 0 {
// This instance has more than one of at least one type of IP address (public, private, // This instance has more than one of at least one type of IP address (public, private,
// IPv4, IPv6, etc. We provide those extra IPs found here just like we do for instance // IPv4,etc. We provide those extra IPs found here just like we do for instance
// tags, we surround a separated list with the tagSeparator config. // tags, we surround a separated list with the tagSeparator config.
ips := d.tagSeparator + strings.Join(extraIPs, d.tagSeparator) + d.tagSeparator ips := d.tagSeparator + strings.Join(extraIPs, d.tagSeparator) + d.tagSeparator
labels[linodeLabelExtraIPs] = model.LabelValue(ips) labels[linodeLabelExtraIPs] = model.LabelValue(ips)
} }
if len(ipv6Ranges) > 0 {
// This instance has more than one IPv6 Ranges routed to it we provide these
// Ranges found here just like we do for instance tags, we surround a separated
// list with the tagSeparator config.
ips := d.tagSeparator + strings.Join(ipv6Ranges, d.tagSeparator) + d.tagSeparator
labels[linodeLabelIPv6Ranges] = model.LabelValue(ips)
}
tg.Targets = append(tg.Targets, labels) tg.Targets = append(tg.Targets, labels)
} }
return []*targetgroup.Group{tg}, nil return []*targetgroup.Group{tg}, nil

View file

@ -28,159 +28,236 @@ import (
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
) )
type LinodeSDTestSuite struct {
Mock *SDMock
}
func (s *LinodeSDTestSuite) TearDownSuite() {
s.Mock.ShutdownServer()
}
func (s *LinodeSDTestSuite) SetupTest(t *testing.T) {
s.Mock = NewSDMock(t)
s.Mock.Setup()
s.Mock.HandleLinodeInstancesList()
s.Mock.HandleLinodeNeworkingIPs()
s.Mock.HandleLinodeAccountEvents()
}
func TestLinodeSDRefresh(t *testing.T) { func TestLinodeSDRefresh(t *testing.T) {
sdmock := &LinodeSDTestSuite{} sdmock := NewSDMock(t)
sdmock.SetupTest(t) sdmock.Setup()
t.Cleanup(sdmock.TearDownSuite)
cfg := DefaultSDConfig tests := map[string]struct {
cfg.HTTPClientConfig.Authorization = &config.Authorization{ region string
Credentials: tokenID, targetCount int
Type: "Bearer", want []model.LabelSet
}{
"no_region": {region: "", targetCount: 4, want: []model.LabelSet{
{
"__address__": model.LabelValue("45.33.82.151:80"),
"__meta_linode_instance_id": model.LabelValue("26838044"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"),
"__meta_linode_image": model.LabelValue("linode/arch"),
"__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"),
"__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("us-east"),
"__meta_linode_type": model.LabelValue("g6-standard-2"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
"__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"),
"__meta_linode_specs_vcpus": model.LabelValue("2"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"),
"__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"),
},
{
"__address__": model.LabelValue("139.162.196.43:80"),
"__meta_linode_instance_id": model.LabelValue("26848419"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-2"),
"__meta_linode_image": model.LabelValue("linode/debian10"),
"__meta_linode_private_ipv4": model.LabelValue(""),
"__meta_linode_public_ipv4": model.LabelValue("139.162.196.43"),
"__meta_linode_public_ipv6": model.LabelValue("2a01:7e00::f03c:92ff:fe1a:9976"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li1359-43.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("eu-west"),
"__meta_linode_type": model.LabelValue("g6-standard-2"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
"__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"),
"__meta_linode_specs_vcpus": model.LabelValue("2"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"),
},
{
"__address__": model.LabelValue("192.53.120.25:80"),
"__meta_linode_instance_id": model.LabelValue("26837938"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"),
"__meta_linode_image": model.LabelValue("linode/ubuntu20.04"),
"__meta_linode_private_ipv4": model.LabelValue(""),
"__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("ca-central"),
"__meta_linode_type": model.LabelValue("g6-standard-1"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"),
"__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"),
"__meta_linode_specs_vcpus": model.LabelValue("1"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"),
"__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c04:e001:456::/64,"),
},
{
"__address__": model.LabelValue("66.228.47.103:80"),
"__meta_linode_instance_id": model.LabelValue("26837992"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"),
"__meta_linode_image": model.LabelValue("linode/ubuntu20.04"),
"__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"),
"__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("us-east"),
"__meta_linode_type": model.LabelValue("g6-nanode-1"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"),
"__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"),
"__meta_linode_specs_vcpus": model.LabelValue("1"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"),
"__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"),
"__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c03:e000:123::/64,"),
},
}},
"us-east": {region: "us-east", targetCount: 2, want: []model.LabelSet{
{
"__address__": model.LabelValue("45.33.82.151:80"),
"__meta_linode_instance_id": model.LabelValue("26838044"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"),
"__meta_linode_image": model.LabelValue("linode/arch"),
"__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"),
"__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("us-east"),
"__meta_linode_type": model.LabelValue("g6-standard-2"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
"__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"),
"__meta_linode_specs_vcpus": model.LabelValue("2"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"),
"__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"),
},
{
"__address__": model.LabelValue("66.228.47.103:80"),
"__meta_linode_instance_id": model.LabelValue("26837992"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"),
"__meta_linode_image": model.LabelValue("linode/ubuntu20.04"),
"__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"),
"__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("us-east"),
"__meta_linode_type": model.LabelValue("g6-nanode-1"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"),
"__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"),
"__meta_linode_specs_vcpus": model.LabelValue("1"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"),
"__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"),
"__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c03:e000:123::/64,"),
},
}},
"us-central": {region: "ca-central", targetCount: 1, want: []model.LabelSet{
{
"__address__": model.LabelValue("192.53.120.25:80"),
"__meta_linode_instance_id": model.LabelValue("26837938"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"),
"__meta_linode_image": model.LabelValue("linode/ubuntu20.04"),
"__meta_linode_private_ipv4": model.LabelValue(""),
"__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("ca-central"),
"__meta_linode_type": model.LabelValue("g6-standard-1"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"),
"__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"),
"__meta_linode_specs_vcpus": model.LabelValue("1"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"),
"__meta_linode_ipv6_ranges": model.LabelValue(",2600:3c04:e001:456::/64,"),
},
}},
} }
reg := prometheus.NewRegistry() for _, tc := range tests {
refreshMetrics := discovery.NewRefreshMetrics(reg) cfg := DefaultSDConfig
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) if tc.region != "" {
require.NoError(t, metrics.Register()) cfg.Region = tc.region
defer metrics.Unregister() }
defer refreshMetrics.Unregister() cfg.HTTPClientConfig.Authorization = &config.Authorization{
Credentials: tokenID,
Type: "Bearer",
}
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) reg := prometheus.NewRegistry()
require.NoError(t, err) refreshMetrics := discovery.NewRefreshMetrics(reg)
endpoint, err := url.Parse(sdmock.Mock.Endpoint()) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, err) require.NoError(t, metrics.Register())
d.client.SetBaseURL(endpoint.String()) defer metrics.Unregister()
defer refreshMetrics.Unregister()
tgs, err := d.refresh(context.Background()) d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
endpoint, err := url.Parse(sdmock.Endpoint())
require.NoError(t, err)
d.client.SetBaseURL(endpoint.String())
require.Len(t, tgs, 1) tgs, err := d.refresh(context.Background())
require.NoError(t, err)
tg := tgs[0] require.Len(t, tgs, 1)
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 4)
for i, lbls := range []model.LabelSet{ tg := tgs[0]
{ require.NotNil(t, tg)
"__address__": model.LabelValue("45.33.82.151:80"), require.NotNil(t, tg.Targets)
"__meta_linode_instance_id": model.LabelValue("26838044"), require.Len(t, tg.Targets, tc.targetCount)
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-1"),
"__meta_linode_image": model.LabelValue("linode/arch"), for i, lbls := range tc.want {
"__meta_linode_private_ipv4": model.LabelValue("192.168.170.51"), t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
"__meta_linode_public_ipv4": model.LabelValue("45.33.82.151"), require.Equal(t, lbls, tg.Targets[i])
"__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:1382"), })
"__meta_linode_private_ipv4_rdns": model.LabelValue(""), }
"__meta_linode_public_ipv4_rdns": model.LabelValue("li1028-151.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("us-east"),
"__meta_linode_type": model.LabelValue("g6-standard-2"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
"__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"),
"__meta_linode_specs_vcpus": model.LabelValue("2"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"),
"__meta_linode_extra_ips": model.LabelValue(",96.126.108.16,192.168.201.25,"),
},
{
"__address__": model.LabelValue("139.162.196.43:80"),
"__meta_linode_instance_id": model.LabelValue("26848419"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-2"),
"__meta_linode_image": model.LabelValue("linode/debian10"),
"__meta_linode_private_ipv4": model.LabelValue(""),
"__meta_linode_public_ipv4": model.LabelValue("139.162.196.43"),
"__meta_linode_public_ipv6": model.LabelValue("2a01:7e00::f03c:92ff:fe1a:9976"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li1359-43.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("eu-west"),
"__meta_linode_type": model.LabelValue("g6-standard-2"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("85899345920"),
"__meta_linode_specs_memory_bytes": model.LabelValue("4294967296"),
"__meta_linode_specs_vcpus": model.LabelValue("2"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("4194304000"),
},
{
"__address__": model.LabelValue("192.53.120.25:80"),
"__meta_linode_instance_id": model.LabelValue("26837938"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-3"),
"__meta_linode_image": model.LabelValue("linode/ubuntu20.04"),
"__meta_linode_private_ipv4": model.LabelValue(""),
"__meta_linode_public_ipv4": model.LabelValue("192.53.120.25"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c04::f03c:92ff:fe1a:fb68"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li2216-25.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("ca-central"),
"__meta_linode_type": model.LabelValue("g6-standard-1"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("53687091200"),
"__meta_linode_specs_memory_bytes": model.LabelValue("2147483648"),
"__meta_linode_specs_vcpus": model.LabelValue("1"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("2097152000"),
},
{
"__address__": model.LabelValue("66.228.47.103:80"),
"__meta_linode_instance_id": model.LabelValue("26837992"),
"__meta_linode_instance_label": model.LabelValue("prometheus-linode-sd-exporter-4"),
"__meta_linode_image": model.LabelValue("linode/ubuntu20.04"),
"__meta_linode_private_ipv4": model.LabelValue("192.168.148.94"),
"__meta_linode_public_ipv4": model.LabelValue("66.228.47.103"),
"__meta_linode_public_ipv6": model.LabelValue("2600:3c03::f03c:92ff:fe1a:fb4c"),
"__meta_linode_private_ipv4_rdns": model.LabelValue(""),
"__meta_linode_public_ipv4_rdns": model.LabelValue("li328-103.members.linode.com"),
"__meta_linode_public_ipv6_rdns": model.LabelValue(""),
"__meta_linode_region": model.LabelValue("us-east"),
"__meta_linode_type": model.LabelValue("g6-nanode-1"),
"__meta_linode_status": model.LabelValue("running"),
"__meta_linode_tags": model.LabelValue(",monitoring,"),
"__meta_linode_group": model.LabelValue(""),
"__meta_linode_gpus": model.LabelValue("0"),
"__meta_linode_hypervisor": model.LabelValue("kvm"),
"__meta_linode_backups": model.LabelValue("disabled"),
"__meta_linode_specs_disk_bytes": model.LabelValue("26843545600"),
"__meta_linode_specs_memory_bytes": model.LabelValue("1073741824"),
"__meta_linode_specs_vcpus": model.LabelValue("1"),
"__meta_linode_specs_transfer_bytes": model.LabelValue("1048576000"),
"__meta_linode_extra_ips": model.LabelValue(",172.104.18.104,"),
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])
})
} }
} }

View file

@ -14,12 +14,17 @@
package linode package linode
import ( import (
"encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os"
"path/filepath"
"testing" "testing"
) )
const tokenID = "7b2c56dd51edd90952c1b94c472b94b176f20c5c777e376849edd8ad1c6c03bb"
// SDMock is the interface for the Linode mock. // SDMock is the interface for the Linode mock.
type SDMock struct { type SDMock struct {
t *testing.T t *testing.T
@ -43,412 +48,34 @@ func (m *SDMock) Endpoint() string {
func (m *SDMock) Setup() { func (m *SDMock) Setup() {
m.Mux = http.NewServeMux() m.Mux = http.NewServeMux()
m.Server = httptest.NewServer(m.Mux) m.Server = httptest.NewServer(m.Mux)
m.t.Cleanup(m.Server.Close)
m.SetupHandlers()
} }
// ShutdownServer creates the mock server. // SetupHandlers for endpoints of interest.
func (m *SDMock) ShutdownServer() { func (m *SDMock) SetupHandlers() {
m.Server.Close() for _, handler := range []string{"/v4/account/events", "/v4/linode/instances", "/v4/networking/ips", "/v4/networking/ipv6/ranges"} {
} m.Mux.HandleFunc(handler, func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) {
const tokenID = "7b2c56dd51edd90952c1b94c472b94b176f20c5c777e376849edd8ad1c6c03bb" w.WriteHeader(http.StatusUnauthorized)
return
// HandleLinodeInstancesList mocks linode instances list. }
func (m *SDMock) HandleLinodeInstancesList() { xFilter := struct {
m.Mux.HandleFunc("/v4/linode/instances", func(w http.ResponseWriter, r *http.Request) { Region string `json:"region"`
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) { }{}
w.WriteHeader(http.StatusUnauthorized) json.Unmarshal([]byte(r.Header.Get("X-Filter")), &xFilter)
return
} directory := "testdata/no_region_filter"
if xFilter.Region != "" { // Validate region filter matches test criteria.
w.Header().Set("content-type", "application/json; charset=utf-8") directory = "testdata/" + xFilter.Region
w.WriteHeader(http.StatusOK) }
if response, err := os.ReadFile(filepath.Join(directory, r.URL.Path+".json")); err == nil {
fmt.Fprint(w, ` w.Header().Add("content-type", "application/json; charset=utf-8")
{ w.WriteHeader(http.StatusOK)
"data": [ w.Write(response)
{ return
"id": 26838044, }
"label": "prometheus-linode-sd-exporter-1", w.WriteHeader(http.StatusInternalServerError)
"group": "", })
"status": "running", }
"created": "2021-05-12T04:23:44",
"updated": "2021-05-12T04:23:44",
"type": "g6-standard-2",
"ipv4": [
"45.33.82.151",
"96.126.108.16",
"192.168.170.51",
"192.168.201.25"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128",
"image": "linode/arch",
"region": "us-east",
"specs": {
"disk": 81920,
"memory": 4096,
"vcpus": 2,
"gpus": 0,
"transfer": 4000
},
"alerts": {
"cpu": 180,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26848419,
"label": "prometheus-linode-sd-exporter-2",
"group": "",
"status": "running",
"created": "2021-05-12T12:41:49",
"updated": "2021-05-12T12:41:49",
"type": "g6-standard-2",
"ipv4": [
"139.162.196.43"
],
"ipv6": "2a01:7e00::f03c:92ff:fe1a:9976/128",
"image": "linode/debian10",
"region": "eu-west",
"specs": {
"disk": 81920,
"memory": 4096,
"vcpus": 2,
"gpus": 0,
"transfer": 4000
},
"alerts": {
"cpu": 180,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26837938,
"label": "prometheus-linode-sd-exporter-3",
"group": "",
"status": "running",
"created": "2021-05-12T04:20:11",
"updated": "2021-05-12T04:20:11",
"type": "g6-standard-1",
"ipv4": [
"192.53.120.25"
],
"ipv6": "2600:3c04::f03c:92ff:fe1a:fb68/128",
"image": "linode/ubuntu20.04",
"region": "ca-central",
"specs": {
"disk": 51200,
"memory": 2048,
"vcpus": 1,
"gpus": 0,
"transfer": 2000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26837992,
"label": "prometheus-linode-sd-exporter-4",
"group": "",
"status": "running",
"created": "2021-05-12T04:22:06",
"updated": "2021-05-12T04:22:06",
"type": "g6-nanode-1",
"ipv4": [
"66.228.47.103",
"172.104.18.104",
"192.168.148.94"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128",
"image": "linode/ubuntu20.04",
"region": "us-east",
"specs": {
"disk": 25600,
"memory": 1024,
"vcpus": 1,
"gpus": 0,
"transfer": 1000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
}
],
"page": 1,
"pages": 1,
"results": 4
}`,
)
})
}
// HandleLinodeNeworkingIPs mocks linode networking ips endpoint.
func (m *SDMock) HandleLinodeNeworkingIPs() {
m.Mux.HandleFunc("/v4/networking/ips", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.Header().Set("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `
{
"page": 1,
"pages": 1,
"results": 13,
"data": [
{
"address": "192.53.120.25",
"gateway": "192.53.120.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li2216-25.members.linode.com",
"linode_id": 26837938,
"region": "ca-central"
},
{
"address": "66.228.47.103",
"gateway": "66.228.47.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li328-103.members.linode.com",
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "172.104.18.104",
"gateway": "172.104.18.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1832-104.members.linode.com",
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "192.168.148.94",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "192.168.170.51",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "96.126.108.16",
"gateway": "96.126.108.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li365-16.members.linode.com",
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "45.33.82.151",
"gateway": "45.33.82.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1028-151.members.linode.com",
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "192.168.201.25",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "139.162.196.43",
"gateway": "139.162.196.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1359-43.members.linode.com",
"linode_id": 26848419,
"region": "eu-west"
},
{
"address": "2600:3c04::f03c:92ff:fe1a:fb68",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26837938,
"region": "ca-central",
"public": true
},
{
"address": "2600:3c03::f03c:92ff:fe1a:fb4c",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26837992,
"region": "us-east",
"public": true
},
{
"address": "2600:3c03::f03c:92ff:fe1a:1382",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26838044,
"region": "us-east",
"public": true
},
{
"address": "2a01:7e00::f03c:92ff:fe1a:9976",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26848419,
"region": "eu-west",
"public": true
}
]
}`,
)
})
}
// HandleLinodeAccountEvents mocks linode the account/events endpoint.
func (m *SDMock) HandleLinodeAccountEvents() {
m.Mux.HandleFunc("/v4/account/events", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != fmt.Sprintf("Bearer %s", tokenID) {
w.WriteHeader(http.StatusUnauthorized)
return
}
if r.Header.Get("X-Filter") == "" {
// This should never happen; if the client sends an events request without
// a filter, cause it to fail. The error below is not a real response from
// the API, but should aid in debugging failed tests.
w.WriteHeader(http.StatusBadRequest)
fmt.Fprint(w, `
{
"errors": [
{
"reason": "Request missing expected X-Filter headers"
}
]
}`,
)
return
}
w.Header().Set("content-type", "application/json; charset=utf-8")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, `
{
"data": [],
"results": 0,
"pages": 1,
"page": 1
}`,
)
})
} }

View file

@ -0,0 +1,6 @@
{
"data": [],
"results": 0,
"pages": 1,
"page": 1
}

View file

@ -0,0 +1,49 @@
{
"data": [
{
"id": 26837938,
"label": "prometheus-linode-sd-exporter-3",
"group": "",
"status": "running",
"created": "2021-05-12T04:20:11",
"updated": "2021-05-12T04:20:11",
"type": "g6-standard-1",
"ipv4": [
"192.53.120.25"
],
"ipv6": "2600:3c04::f03c:92ff:fe1a:fb68/128",
"image": "linode/ubuntu20.04",
"region": "ca-central",
"specs": {
"disk": 51200,
"memory": 2048,
"vcpus": 1,
"gpus": 0,
"transfer": 2000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
}
],
"page": 1,
"pages": 1,
"results": 1
}

View file

@ -0,0 +1,29 @@
{
"page": 1,
"pages": 1,
"results": 2,
"data": [
{
"address": "192.53.120.25",
"gateway": "192.53.120.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li2216-25.members.linode.com",
"linode_id": 26837938,
"region": "ca-central"
},
{
"address": "2600:3c04::f03c:92ff:fe1a:fb68",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26837938,
"region": "ca-central",
"public": true
}
]
}

View file

@ -0,0 +1,13 @@
{
"data": [
{
"range": "2600:3c04:e001:456::",
"prefix": 64,
"region": "ca-central",
"route_target": "2600:3c04::f03c:92ff:fe1a:fb68"
}
],
"page": 1,
"pages": 1,
"results": 1
}

View file

@ -0,0 +1,6 @@
{
"data": [],
"results": 0,
"pages": 1,
"page": 1
}

View file

@ -0,0 +1,180 @@
{
"data": [
{
"id": 26838044,
"label": "prometheus-linode-sd-exporter-1",
"group": "",
"status": "running",
"created": "2021-05-12T04:23:44",
"updated": "2021-05-12T04:23:44",
"type": "g6-standard-2",
"ipv4": [
"45.33.82.151",
"96.126.108.16",
"192.168.170.51",
"192.168.201.25"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128",
"image": "linode/arch",
"region": "us-east",
"specs": {
"disk": 81920,
"memory": 4096,
"vcpus": 2,
"gpus": 0,
"transfer": 4000
},
"alerts": {
"cpu": 180,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26848419,
"label": "prometheus-linode-sd-exporter-2",
"group": "",
"status": "running",
"created": "2021-05-12T12:41:49",
"updated": "2021-05-12T12:41:49",
"type": "g6-standard-2",
"ipv4": [
"139.162.196.43"
],
"ipv6": "2a01:7e00::f03c:92ff:fe1a:9976/128",
"image": "linode/debian10",
"region": "eu-west",
"specs": {
"disk": 81920,
"memory": 4096,
"vcpus": 2,
"gpus": 0,
"transfer": 4000
},
"alerts": {
"cpu": 180,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26837938,
"label": "prometheus-linode-sd-exporter-3",
"group": "",
"status": "running",
"created": "2021-05-12T04:20:11",
"updated": "2021-05-12T04:20:11",
"type": "g6-standard-1",
"ipv4": [
"192.53.120.25"
],
"ipv6": "2600:3c04::f03c:92ff:fe1a:fb68/128",
"image": "linode/ubuntu20.04",
"region": "ca-central",
"specs": {
"disk": 51200,
"memory": 2048,
"vcpus": 1,
"gpus": 0,
"transfer": 2000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26837992,
"label": "prometheus-linode-sd-exporter-4",
"group": "",
"status": "running",
"created": "2021-05-12T04:22:06",
"updated": "2021-05-12T04:22:06",
"type": "g6-nanode-1",
"ipv4": [
"66.228.47.103",
"172.104.18.104",
"192.168.148.94"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128",
"image": "linode/ubuntu20.04",
"region": "us-east",
"specs": {
"disk": 25600,
"memory": 1024,
"vcpus": 1,
"gpus": 0,
"transfer": 1000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
}
],
"page": 1,
"pages": 1,
"results": 4
}

View file

@ -0,0 +1,150 @@
{
"page": 1,
"pages": 1,
"results": 13,
"data": [
{
"address": "192.53.120.25",
"gateway": "192.53.120.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li2216-25.members.linode.com",
"linode_id": 26837938,
"region": "ca-central"
},
{
"address": "66.228.47.103",
"gateway": "66.228.47.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li328-103.members.linode.com",
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "172.104.18.104",
"gateway": "172.104.18.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1832-104.members.linode.com",
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "192.168.148.94",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "192.168.170.51",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "96.126.108.16",
"gateway": "96.126.108.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li365-16.members.linode.com",
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "45.33.82.151",
"gateway": "45.33.82.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1028-151.members.linode.com",
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "192.168.201.25",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "139.162.196.43",
"gateway": "139.162.196.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1359-43.members.linode.com",
"linode_id": 26848419,
"region": "eu-west"
},
{
"address": "2600:3c04::f03c:92ff:fe1a:fb68",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26837938,
"region": "ca-central",
"public": true
},
{
"address": "2600:3c03::f03c:92ff:fe1a:fb4c",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26837992,
"region": "us-east",
"public": true
},
{
"address": "2600:3c03::f03c:92ff:fe1a:1382",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26838044,
"region": "us-east",
"public": true
},
{
"address": "2a01:7e00::f03c:92ff:fe1a:9976",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26848419,
"region": "eu-west",
"public": true
}
]
}

View file

@ -0,0 +1,19 @@
{
"data": [
{
"range": "2600:3c03:e000:123::",
"prefix": 64,
"region": "us-east",
"route_target": "2600:3c03::f03c:92ff:fe1a:fb4c"
},
{
"range": "2600:3c04:e001:456::",
"prefix": 64,
"region": "ca-central",
"route_target": "2600:3c04::f03c:92ff:fe1a:fb68"
}
],
"page": 1,
"pages": 1,
"results": 2
}

View file

@ -0,0 +1,6 @@
{
"data": [],
"results": 0,
"pages": 1,
"page": 1
}

View file

@ -0,0 +1,97 @@
{
"data": [
{
"id": 26838044,
"label": "prometheus-linode-sd-exporter-1",
"group": "",
"status": "running",
"created": "2021-05-12T04:23:44",
"updated": "2021-05-12T04:23:44",
"type": "g6-standard-2",
"ipv4": [
"45.33.82.151",
"96.126.108.16",
"192.168.170.51",
"192.168.201.25"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:1382/128",
"image": "linode/arch",
"region": "us-east",
"specs": {
"disk": 81920,
"memory": 4096,
"vcpus": 2,
"gpus": 0,
"transfer": 4000
},
"alerts": {
"cpu": 180,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
},
{
"id": 26837992,
"label": "prometheus-linode-sd-exporter-4",
"group": "",
"status": "running",
"created": "2021-05-12T04:22:06",
"updated": "2021-05-12T04:22:06",
"type": "g6-nanode-1",
"ipv4": [
"66.228.47.103",
"172.104.18.104",
"192.168.148.94"
],
"ipv6": "2600:3c03::f03c:92ff:fe1a:fb4c/128",
"image": "linode/ubuntu20.04",
"region": "us-east",
"specs": {
"disk": 25600,
"memory": 1024,
"vcpus": 1,
"gpus": 0,
"transfer": 1000
},
"alerts": {
"cpu": 90,
"network_in": 10,
"network_out": 10,
"transfer_quota": 80,
"io": 10000
},
"backups": {
"enabled": false,
"schedule": {
"day": null,
"window": null
},
"last_successful": null
},
"hypervisor": "kvm",
"watchdog_enabled": true,
"tags": [
"monitoring"
]
}
],
"page": 1,
"pages": 1,
"results": 2
}

View file

@ -0,0 +1,106 @@
{
"page": 1,
"pages": 1,
"results": 9,
"data": [
{
"address": "66.228.47.103",
"gateway": "66.228.47.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li328-103.members.linode.com",
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "172.104.18.104",
"gateway": "172.104.18.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1832-104.members.linode.com",
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "192.168.148.94",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26837992,
"region": "us-east"
},
{
"address": "192.168.170.51",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "96.126.108.16",
"gateway": "96.126.108.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li365-16.members.linode.com",
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "45.33.82.151",
"gateway": "45.33.82.1",
"subnet_mask": "255.255.255.0",
"prefix": 24,
"type": "ipv4",
"public": true,
"rdns": "li1028-151.members.linode.com",
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "192.168.201.25",
"gateway": null,
"subnet_mask": "255.255.128.0",
"prefix": 17,
"type": "ipv4",
"public": false,
"rdns": null,
"linode_id": 26838044,
"region": "us-east"
},
{
"address": "2600:3c03::f03c:92ff:fe1a:fb4c",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26837992,
"region": "us-east",
"public": true
},
{
"address": "2600:3c03::f03c:92ff:fe1a:1382",
"gateway": "fe80::1",
"subnet_mask": "ffff:ffff:ffff:ffff::",
"prefix": 64,
"type": "ipv6",
"rdns": null,
"linode_id": 26838044,
"region": "us-east",
"public": true
}
]
}

View file

@ -0,0 +1,13 @@
{
"data": [
{
"range": "2600:3c03:e000:123::",
"prefix": 64,
"region": "us-east",
"route_target": "2600:3c03::f03c:92ff:fe1a:fb4c"
}
],
"page": 1,
"pages": 1,
"results": 1
}

View file

@ -169,6 +169,13 @@ func (m *Manager) Providers() []*Provider {
return m.providers return m.providers
} }
// UnregisterMetrics unregisters manager metrics. It does not unregister
// service discovery or refresh metrics, whose lifecycle is managed independent
// of the discovery Manager.
func (m *Manager) UnregisterMetrics() {
m.metrics.Unregister(m.registerer)
}
// Run starts the background processing. // Run starts the background processing.
func (m *Manager) Run() error { func (m *Manager) Run() error {
go m.sender() go m.sender()

View file

@ -36,11 +36,11 @@ func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m) testutil.TolerantVerifyLeak(m)
} }
func NewTestMetrics(t *testing.T, reg prometheus.Registerer) (*RefreshMetricsManager, map[string]DiscovererMetrics) { func NewTestMetrics(t *testing.T, reg prometheus.Registerer) (RefreshMetricsManager, map[string]DiscovererMetrics) {
refreshMetrics := NewRefreshMetrics(reg) refreshMetrics := NewRefreshMetrics(reg)
sdMetrics, err := RegisterSDMetrics(reg, refreshMetrics) sdMetrics, err := RegisterSDMetrics(reg, refreshMetrics)
require.NoError(t, err) require.NoError(t, err)
return &refreshMetrics, sdMetrics return refreshMetrics, sdMetrics
} }
// TestTargetUpdatesOrder checks that the target updates are received in the expected order. // TestTargetUpdatesOrder checks that the target updates are received in the expected order.
@ -733,7 +733,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group,
t.Helper() t.Helper()
if _, ok := tGroups[key]; !ok { if _, ok := tGroups[key]; !ok {
t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
return
} }
match := false match := false
var mergedTargets string var mergedTargets string
@ -1542,3 +1541,24 @@ func (t *testDiscoverer) update(tgs []*targetgroup.Group) {
<-t.ready <-t.ready
t.up <- tgs t.up <- tgs
} }
func TestUnregisterMetrics(t *testing.T) {
reg := prometheus.NewRegistry()
// Check that all metrics can be unregistered, allowing a second manager to be created.
for i := 0; i < 2; i++ {
ctx, cancel := context.WithCancel(context.Background())
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
// discoveryManager will be nil if there was an error configuring metrics.
require.NotNil(t, discoveryManager)
// Unregister all metrics.
discoveryManager.UnregisterMetrics()
for _, sdMetric := range sdMetrics {
sdMetric.Unregister()
}
refreshMetrics.Unregister()
cancel()
}
}

View file

@ -339,7 +339,7 @@ type appListClient func(ctx context.Context, client *http.Client, url string) (*
// fetchApps requests a list of applications from a marathon server. // fetchApps requests a list of applications from a marathon server.
func fetchApps(ctx context.Context, client *http.Client, url string) (*appList, error) { func fetchApps(ctx context.Context, client *http.Client, url string) (*appList, error) {
request, err := http.NewRequest("GET", url, nil) request, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -99,3 +99,12 @@ func NewManagerMetrics(registerer prometheus.Registerer, sdManagerName string) (
return m, nil return m, nil
} }
// Unregister unregisters all metrics.
func (m *Metrics) Unregister(registerer prometheus.Registerer) {
registerer.Unregister(m.FailedConfigs)
registerer.Unregister(m.DiscoveredTargets)
registerer.Unregister(m.ReceivedUpdates)
registerer.Unregister(m.DelayedUpdates)
registerer.Unregister(m.SentUpdates)
}

View file

@ -239,7 +239,7 @@ const hypervisorListBody = `
// HandleHypervisorListSuccessfully mocks os-hypervisors detail call. // HandleHypervisorListSuccessfully mocks os-hypervisors detail call.
func (m *SDMock) HandleHypervisorListSuccessfully() { func (m *SDMock) HandleHypervisorListSuccessfully() {
m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, http.MethodGet)
testHeader(m.t, r, "X-Auth-Token", tokenID) testHeader(m.t, r, "X-Auth-Token", tokenID)
w.Header().Add("Content-Type", "application/json") w.Header().Add("Content-Type", "application/json")
@ -536,7 +536,7 @@ const serverListBody = `
// HandleServerListSuccessfully mocks server detail call. // HandleServerListSuccessfully mocks server detail call.
func (m *SDMock) HandleServerListSuccessfully() { func (m *SDMock) HandleServerListSuccessfully() {
m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, http.MethodGet)
testHeader(m.t, r, "X-Auth-Token", tokenID) testHeader(m.t, r, "X-Auth-Token", tokenID)
w.Header().Add("Content-Type", "application/json") w.Header().Add("Content-Type", "application/json")
@ -575,7 +575,7 @@ const listOutput = `
// HandleFloatingIPListSuccessfully mocks floating ips call. // HandleFloatingIPListSuccessfully mocks floating ips call.
func (m *SDMock) HandleFloatingIPListSuccessfully() { func (m *SDMock) HandleFloatingIPListSuccessfully() {
m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { m.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) {
testMethod(m.t, r, "GET") testMethod(m.t, r, http.MethodGet)
testHeader(m.t, r, "X-Auth-Token", tokenID) testHeader(m.t, r, "X-Auth-Token", tokenID)
w.Header().Add("Content-Type", "application/json") w.Header().Add("Content-Type", "application/json")

View file

@ -189,7 +189,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return nil, err return nil, err
} }
req, err := http.NewRequest("POST", d.url, bytes.NewBuffer(bodyBytes)) req, err := http.NewRequest(http.MethodPost, d.url, bytes.NewBuffer(bodyBytes))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -211,7 +211,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups) endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups)
} }
req, err := http.NewRequest("GET", endpoint, nil) req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -179,7 +179,7 @@ func (rc *HTTPResourceClient) Fetch(ctx context.Context) (*v3.DiscoveryResponse,
return nil, err return nil, err
} }
request, err := http.NewRequest("POST", rc.endpoint, bytes.NewBuffer(reqBody)) request, err := http.NewRequest(http.MethodPost, rc.endpoint, bytes.NewBuffer(reqBody))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -54,7 +54,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` | | <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` | | <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |
| <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` | | <code class="text-nowrap">--query.max-samples</code> | Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return. Use with server mode only. | `50000000` |
| <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | | | <code class="text-nowrap">--enable-feature</code> | Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details. | |
| <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` | | <code class="text-nowrap">--log.level</code> | Only log messages with the given severity or above. One of: [debug, info, warn, error] | `info` |
| <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` | | <code class="text-nowrap">--log.format</code> | Output format of log messages. One of: [logfmt, json] | `logfmt` |

View file

@ -600,8 +600,10 @@ See below for the configuration options for Azure discovery:
# The Azure environment. # The Azure environment.
[ environment: <string> | default = AzurePublicCloud ] [ environment: <string> | default = AzurePublicCloud ]
# The authentication method, either OAuth or ManagedIdentity. # The authentication method, either OAuth, ManagedIdentity or SDK.
# See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview # See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
# SDK authentication method uses environment variables by default.
# See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication
[ authentication_method: <string> | default = OAuth] [ authentication_method: <string> | default = OAuth]
# The subscription ID. Always required. # The subscription ID. Always required.
subscription_id: <string> subscription_id: <string>
@ -2447,11 +2449,15 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_linode_private_ipv4`: the private IPv4 of the linode instance * `__meta_linode_private_ipv4`: the private IPv4 of the linode instance
* `__meta_linode_public_ipv4`: the public IPv4 of the linode instance * `__meta_linode_public_ipv4`: the public IPv4 of the linode instance
* `__meta_linode_public_ipv6`: the public IPv6 of the linode instance * `__meta_linode_public_ipv6`: the public IPv6 of the linode instance
* `__meta_linode_private_ipv4_rdns`: the reverse DNS for the first private IPv4 of the linode instance
* `__meta_linode_public_ipv4_rdns`: the reverse DNS for the first public IPv4 of the linode instance
* `__meta_linode_public_ipv6_rdns`: the reverse DNS for the first public IPv6 of the linode instance
* `__meta_linode_region`: the region of the linode instance * `__meta_linode_region`: the region of the linode instance
* `__meta_linode_type`: the type of the linode instance * `__meta_linode_type`: the type of the linode instance
* `__meta_linode_status`: the status of the linode instance * `__meta_linode_status`: the status of the linode instance
* `__meta_linode_tags`: a list of tags of the linode instance joined by the tag separator * `__meta_linode_tags`: a list of tags of the linode instance joined by the tag separator
* `__meta_linode_group`: the display group a linode instance is a member of * `__meta_linode_group`: the display group a linode instance is a member of
* `__meta_linode_gpus`: the number of GPU's of the linode instance
* `__meta_linode_hypervisor`: the virtualization software powering the linode instance * `__meta_linode_hypervisor`: the virtualization software powering the linode instance
* `__meta_linode_backups`: the backup service status of the linode instance * `__meta_linode_backups`: the backup service status of the linode instance
* `__meta_linode_specs_disk_bytes`: the amount of storage space the linode instance has access to * `__meta_linode_specs_disk_bytes`: the amount of storage space the linode instance has access to
@ -2459,6 +2465,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_linode_specs_vcpus`: the number of VCPUS this linode has access to * `__meta_linode_specs_vcpus`: the number of VCPUS this linode has access to
* `__meta_linode_specs_transfer_bytes`: the amount of network transfer the linode instance is allotted each month * `__meta_linode_specs_transfer_bytes`: the amount of network transfer the linode instance is allotted each month
* `__meta_linode_extra_ips`: a list of all extra IPv4 addresses assigned to the linode instance joined by the tag separator * `__meta_linode_extra_ips`: a list of all extra IPv4 addresses assigned to the linode instance joined by the tag separator
* `__meta_linode_ipv6_ranges`: a list of IPv6 ranges with mask assigned to the linode instance joined by the tag separator
```yaml ```yaml
# Authentication information used to authenticate to the API server. # Authentication information used to authenticate to the API server.
@ -2489,6 +2496,9 @@ authorization:
oauth2: oauth2:
[ <oauth2> ] [ <oauth2> ]
# Optional region to filter on.
[ region: <string> ]
# Optional proxy URL. # Optional proxy URL.
[ proxy_url: <string> ] [ proxy_url: <string> ]
# Comma-separated string that can contain IPs, CIDR notation, domain names # Comma-separated string that can contain IPs, CIDR notation, domain names
@ -3226,7 +3236,7 @@ are set to the scheme and metrics path of the target respectively. The `__param_
label is set to the value of the first passed URL parameter called `<name>`. label is set to the value of the first passed URL parameter called `<name>`.
The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's The `__scrape_interval__` and `__scrape_timeout__` labels are set to the target's
interval and timeout. This is **experimental** and could change in the future. interval and timeout.
Additional labels prefixed with `__meta_` may be available during the Additional labels prefixed with `__meta_` may be available during the
relabeling phase. They are set by the service discovery mechanism that provided relabeling phase. They are set by the service discovery mechanism that provided
@ -3619,6 +3629,11 @@ azuread:
[ client_secret: <string> ] [ client_secret: <string> ]
[ tenant_id: <string> ] ] [ tenant_id: <string> ] ]
# Azure SDK auth.
# See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication
[ sdk:
[ tenant_id: <string> ] ]
# Configures the remote write request's TLS settings. # Configures the remote write request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]

View file

@ -12,6 +12,7 @@ scrape_configs:
linode_sd_configs: linode_sd_configs:
- authorization: - authorization:
credentials: "<replace with a Personal Access Token with linodes:read_only, ips:read_only, and events:read_only access>" credentials: "<replace with a Personal Access Token with linodes:read_only, ips:read_only, and events:read_only access>"
region: "us-east"
relabel_configs: relabel_configs:
# Only scrape targets that have a tag 'monitoring'. # Only scrape targets that have a tag 'monitoring'.
- source_labels: [__meta_linode_tags] - source_labels: [__meta_linode_tags]

View file

@ -9,7 +9,7 @@ require (
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.5 github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/prometheus/common v0.49.0 github.com/prometheus/common v0.50.0
github.com/prometheus/prometheus v0.50.1 github.com/prometheus/prometheus v0.50.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )
@ -58,17 +58,17 @@ require (
go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.19.0 // indirect golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.21.0 // indirect golang.org/x/net v0.22.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sys v0.17.0 // indirect golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect
google.golang.org/grpc v1.61.0 // indirect google.golang.org/grpc v1.61.0 // indirect
google.golang.org/protobuf v1.32.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.28.6 // indirect k8s.io/apimachinery v0.28.6 // indirect

View file

@ -269,8 +269,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.49.0 h1:ToNTdK4zSnPVJmh698mGFkDor9wBI/iGaJy5dbH1EgI= github.com/prometheus/common v0.50.0 h1:YSZE6aa9+luNa2da6/Tik0q0A5AbR+U003TItK57CPQ=
github.com/prometheus/common v0.49.0/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE= github.com/prometheus/common v0.50.0/go.mod h1:wHFBCEVWVmHMUpg7pYcOm2QUR/ocQdYSJVQJKnHc3xQ=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -332,8 +332,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -356,12 +356,12 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -389,12 +389,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -436,8 +436,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -74,7 +74,7 @@ testmetric,test_label=test_label_value2 value=5.1234 123456789123
server := httptest.NewServer(http.HandlerFunc( server := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) { func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "POST", r.Method, "Unexpected method.") require.Equal(t, http.MethodPost, r.Method, "Unexpected method.")
require.Equal(t, "/write", r.URL.Path, "Unexpected path.") require.Equal(t, "/write", r.URL.Path, "Unexpected path.")
b, err := io.ReadAll(r.Body) b, err := io.ReadAll(r.Body)
require.NoError(t, err, "Error reading body.") require.NoError(t, err, "Error reading body.")

View file

@ -105,7 +105,7 @@ func (c *Client) Write(samples model.Samples) error {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout) ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel() defer cancel()
req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer(buf)) req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewBuffer(buf))
if err != nil { if err != nil {
return err return err
} }

20
go.mod
View file

@ -41,7 +41,7 @@ require (
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.7 github.com/klauspost/compress v1.17.7
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.29.0 github.com/linode/linodego v1.30.0
github.com/miekg/dns v1.1.58 github.com/miekg/dns v1.1.58
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -60,9 +60,9 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2 github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/featuregate v1.3.0 go.opentelemetry.io/collector/featuregate v1.4.0
go.opentelemetry.io/collector/pdata v1.3.0 go.opentelemetry.io/collector/pdata v1.4.0
go.opentelemetry.io/collector/semconv v0.96.0 go.opentelemetry.io/collector/semconv v0.97.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0
go.opentelemetry.io/otel v1.24.0 go.opentelemetry.io/otel v1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0
@ -74,7 +74,6 @@ require (
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.22.0 golang.org/x/net v0.22.0
golang.org/x/oauth2 v0.18.0 golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.6.0 golang.org/x/sync v0.6.0
@ -84,12 +83,12 @@ require (
google.golang.org/api v0.168.0 google.golang.org/api v0.168.0
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8
google.golang.org/grpc v1.62.1 google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.32.0 google.golang.org/protobuf v1.33.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.2 k8s.io/api v0.29.3
k8s.io/apimachinery v0.29.2 k8s.io/apimachinery v0.29.3
k8s.io/client-go v0.29.2 k8s.io/client-go v0.29.3
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.120.1 k8s.io/klog/v2 v2.120.1
) )
@ -134,7 +133,7 @@ require (
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/golang/glog v1.2.0 // indirect github.com/golang/glog v1.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
@ -186,6 +185,7 @@ require (
go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.16.0 // indirect golang.org/x/mod v0.16.0 // indirect
golang.org/x/term v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect

40
go.sum
View file

@ -280,8 +280,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -424,8 +424,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@ -471,8 +471,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4= github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k=
github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk= github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -720,12 +720,12 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/featuregate v1.3.0 h1:nrFSx+zfjdisjE9oCx25Aep3nJ9RaUjeE1qFL6eovoU= go.opentelemetry.io/collector/featuregate v1.4.0 h1:RWE9M659C9iuUQc4GzBsndkGHG1jIzIY+nZJWvcKy1M=
go.opentelemetry.io/collector/featuregate v1.3.0/go.mod h1:mm8+xyQfgDmqhyegZRNIQmoKsNnDTwWKFLsdMoXAb7A= go.opentelemetry.io/collector/featuregate v1.4.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
go.opentelemetry.io/collector/pdata v1.3.0 h1:JRYN7tVHYFwmtQhIYbxWeiKSa2L1nCohyAs8sYqKFZo= go.opentelemetry.io/collector/pdata v1.4.0 h1:cA6Pr7Z2V7mE+i7FmYpavX7nefzd6H4CICgW0T9aJX0=
go.opentelemetry.io/collector/pdata v1.3.0/go.mod h1:t7W0Undtes53HODPdSujPLTnfSR5fzT+WpL+RTaaayo= go.opentelemetry.io/collector/pdata v1.4.0/go.mod h1:0Ttp4wQinhV5oJTd9MjyvUegmZBO9O0nrlh/+EDLw+Q=
go.opentelemetry.io/collector/semconv v0.96.0 h1:DrZy8BpzJDnN2zFxXRj6BhfGYxNlqpFHBqyuS9fVHRY= go.opentelemetry.io/collector/semconv v0.97.0 h1:iF3nTfThbiOwz7o5Pocn0dDnDoffd18ijDuf6Mwzi1s=
go.opentelemetry.io/collector/semconv v0.96.0/go.mod h1:zOm/U3pgMIWcvrcnPbR9Xx2HinoXj46ERMK8PUV9wrs= go.opentelemetry.io/collector/semconv v0.97.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
@ -1118,8 +1118,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1161,12 +1161,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=

View file

@ -39,7 +39,8 @@ type Label struct {
} }
func (ls Labels) String() string { func (ls Labels) String() string {
var b bytes.Buffer var bytea [1024]byte // On stack to avoid memory allocation while building the output.
b := bytes.NewBuffer(bytea[:0])
b.WriteByte('{') b.WriteByte('{')
i := 0 i := 0
@ -50,7 +51,7 @@ func (ls Labels) String() string {
} }
b.WriteString(l.Name) b.WriteString(l.Name)
b.WriteByte('=') b.WriteByte('=')
b.WriteString(strconv.Quote(l.Value)) b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value))
i++ i++
}) })
b.WriteByte('}') b.WriteByte('}')

View file

@ -363,13 +363,11 @@ func Compare(a, b Labels) int {
// Now we know that there is some difference before the end of a and b. // Now we know that there is some difference before the end of a and b.
// Go back through the fields and find which field that difference is in. // Go back through the fields and find which field that difference is in.
firstCharDifferent := i firstCharDifferent, i := i, 0
for i = 0; ; { size, nextI := decodeSize(a.data, i)
size, nextI := decodeSize(a.data, i) for nextI+size <= firstCharDifferent {
if nextI+size > firstCharDifferent {
break
}
i = nextI + size i = nextI + size
size, nextI = decodeSize(a.data, i)
} }
// Difference is inside this entry. // Difference is inside this entry.
aStr, _ := decodeString(a.data, i) aStr, _ := decodeString(a.data, i)

View file

@ -16,6 +16,7 @@ package labels
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"strings" "strings"
"testing" "testing"
@ -25,24 +26,31 @@ import (
func TestLabels_String(t *testing.T) { func TestLabels_String(t *testing.T) {
cases := []struct { cases := []struct {
lables Labels labels Labels
expected string expected string
}{ }{
{ {
lables: FromStrings("t1", "t1", "t2", "t2"), labels: FromStrings("t1", "t1", "t2", "t2"),
expected: "{t1=\"t1\", t2=\"t2\"}", expected: "{t1=\"t1\", t2=\"t2\"}",
}, },
{ {
lables: Labels{}, labels: Labels{},
expected: "{}", expected: "{}",
}, },
} }
for _, c := range cases { for _, c := range cases {
str := c.lables.String() str := c.labels.String()
require.Equal(t, c.expected, str) require.Equal(t, c.expected, str)
} }
} }
func BenchmarkString(b *testing.B) {
ls := New(benchmarkLabels...)
for i := 0; i < b.N; i++ {
_ = ls.String()
}
}
func TestLabels_MatchLabels(t *testing.T) { func TestLabels_MatchLabels(t *testing.T) {
labels := FromStrings( labels := FromStrings(
"__name__", "ALERTS", "__name__", "ALERTS",
@ -529,6 +537,16 @@ var comparisonBenchmarkScenarios = []struct {
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
}, },
{
"real long equal",
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
},
{
"real long different end",
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
},
} }
func BenchmarkLabels_Equals(b *testing.B) { func BenchmarkLabels_Equals(b *testing.B) {
@ -789,24 +807,24 @@ func BenchmarkLabels_Hash(b *testing.B) {
} }
} }
func BenchmarkBuilder(b *testing.B) { var benchmarkLabels = []Label{
m := []Label{ {"job", "node"},
{"job", "node"}, {"instance", "123.123.1.211:9090"},
{"instance", "123.123.1.211:9090"}, {"path", "/api/v1/namespaces/<namespace>/deployments/<name>"},
{"path", "/api/v1/namespaces/<namespace>/deployments/<name>"}, {"method", http.MethodGet},
{"method", "GET"}, {"namespace", "system"},
{"namespace", "system"}, {"status", "500"},
{"status", "500"}, {"prometheus", "prometheus-core-1"},
{"prometheus", "prometheus-core-1"}, {"datacenter", "eu-west-1"},
{"datacenter", "eu-west-1"}, {"pod_name", "abcdef-99999-defee"},
{"pod_name", "abcdef-99999-defee"}, }
}
func BenchmarkBuilder(b *testing.B) {
var l Labels var l Labels
builder := NewBuilder(EmptyLabels()) builder := NewBuilder(EmptyLabels())
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
builder.Reset(EmptyLabels()) builder.Reset(EmptyLabels())
for _, l := range m { for _, l := range benchmarkLabels {
builder.Set(l.Name, l.Value) builder.Set(l.Name, l.Value)
} }
l = builder.Labels() l = builder.Labels()
@ -815,18 +833,7 @@ func BenchmarkBuilder(b *testing.B) {
} }
func BenchmarkLabels_Copy(b *testing.B) { func BenchmarkLabels_Copy(b *testing.B) {
m := map[string]string{ l := New(benchmarkLabels...)
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
l := FromMap(m)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
l = l.Copy() l = l.Copy()

View file

@ -118,3 +118,30 @@ func (m *Matcher) GetRegexString() string {
} }
return m.re.GetRegexString() return m.re.GetRegexString()
} }
// SetMatches returns a set of equality matchers for the current regex matchers if possible.
// For examples the regexp `a(b|f)` will returns "ab" and "af".
// Returns nil if we can't replace the regexp by only equality matchers.
func (m *Matcher) SetMatches() []string {
if m.re == nil {
return nil
}
return m.re.SetMatches()
}
// Prefix returns the required prefix of the value to match, if possible.
// It will be empty if it's an equality matcher or if the prefix can't be determined.
func (m *Matcher) Prefix() string {
if m.re == nil {
return ""
}
return m.re.prefix
}
// IsRegexOptimized returns whether regex is optimized.
func (m *Matcher) IsRegexOptimized() bool {
if m.re == nil {
return false
}
return m.re.IsOptimized()
}

View file

@ -14,13 +14,14 @@
package labels package labels
import ( import (
"fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher { func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher {
m, err := NewMatcher(mType, "", value) m, err := NewMatcher(mType, "test_label_name", value)
require.NoError(t, err) require.NoError(t, err)
return m return m
} }
@ -81,6 +82,21 @@ func TestMatcher(t *testing.T) {
value: "foo-bar", value: "foo-bar",
match: false, match: false,
}, },
{
matcher: mustNewMatcher(t, MatchRegexp, "$*bar"),
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "bar^+"),
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "$+bar"),
value: "foo-bar",
match: false,
},
} }
for _, test := range tests { for _, test := range tests {
@ -118,6 +134,82 @@ func TestInverse(t *testing.T) {
} }
} }
func TestPrefix(t *testing.T) {
for i, tc := range []struct {
matcher *Matcher
prefix string
}{
{
matcher: mustNewMatcher(t, MatchEqual, "abc"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchNotEqual, "abc"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abcd|abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchNotRegexp, "abcd|abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc(def|ghj)|ab|a."),
prefix: "a",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "foo.+bar|foo.*baz"),
prefix: "foo",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc|.*"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc|def"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, ".+def"),
prefix: "",
},
} {
t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) {
require.Equal(t, tc.prefix, tc.matcher.Prefix())
})
}
}
func TestIsRegexOptimized(t *testing.T) {
for i, tc := range []struct {
matcher *Matcher
isRegexOptimized bool
}{
{
matcher: mustNewMatcher(t, MatchEqual, "abc"),
isRegexOptimized: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "."),
isRegexOptimized: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc.+"),
isRegexOptimized: true,
},
} {
t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) {
require.Equal(t, tc.isRegexOptimized, tc.matcher.IsRegexOptimized())
})
}
}
func BenchmarkMatchType_String(b *testing.B) { func BenchmarkMatchType_String(b *testing.B) {
for i := 0; i <= b.N; i++ { for i := 0; i <= b.N; i++ {
_ = MatchType(i % int(MatchNotRegexp+1)).String() _ = MatchType(i % int(MatchNotRegexp+1)).String()

View file

@ -14,73 +14,348 @@
package labels package labels
import ( import (
"slices"
"strings" "strings"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/grafana/regexp/syntax" "github.com/grafana/regexp/syntax"
) )
type FastRegexMatcher struct { const (
re *regexp.Regexp maxSetMatches = 256
prefix string
suffix string
contains string
// shortcut for literals // The minimum number of alternate values a regex should have to trigger
literal bool // the optimization done by optimizeEqualStringMatchers() and so use a map
value string // to match values instead of iterating over a list. This value has
// been computed running BenchmarkOptimizeEqualStringMatchers.
minEqualMultiStringMatcherMapThreshold = 16
)
type FastRegexMatcher struct {
// Under some conditions, re is nil because the expression is never parsed.
// We store the original string to be able to return it in GetRegexString().
reString string
re *regexp.Regexp
setMatches []string
stringMatcher StringMatcher
prefix string
suffix string
contains string
// matchString is the "compiled" function to run by MatchString().
matchString func(string) bool
} }
func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
if isLiteral(v) {
return &FastRegexMatcher{literal: true, value: v}, nil
}
re, err := regexp.Compile("^(?:" + v + ")$")
if err != nil {
return nil, err
}
parsed, err := syntax.Parse(v, syntax.Perl)
if err != nil {
return nil, err
}
m := &FastRegexMatcher{ m := &FastRegexMatcher{
re: re, reString: v,
} }
if parsed.Op == syntax.OpConcat { m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v)
m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) if m.stringMatcher != nil {
// If we already have a string matcher, we don't need to parse the regex
// or compile the matchString function. This also avoids the behavior in
// compileMatchStringFunction where it prefers to use setMatches when
// available, even if the string matcher is faster.
m.matchString = m.stringMatcher.Matches
} else {
parsed, err := syntax.Parse(v, syntax.Perl)
if err != nil {
return nil, err
}
// Simplify the syntax tree to run faster.
parsed = parsed.Simplify()
m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$")
if err != nil {
return nil, err
}
if parsed.Op == syntax.OpConcat {
m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
}
if matches, caseSensitive := findSetMatches(parsed); caseSensitive {
m.setMatches = matches
}
m.stringMatcher = stringMatcherFromRegexp(parsed)
m.matchString = m.compileMatchStringFunction()
} }
return m, nil return m, nil
} }
// compileMatchStringFunction returns the function to run by MatchString().
func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
// If the only optimization available is the string matcher, then we can just run it.
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil {
return m.stringMatcher.Matches
}
return func(s string) bool {
if len(m.setMatches) != 0 {
for _, match := range m.setMatches {
if match == s {
return true
}
}
return false
}
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
return false
}
if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
return false
}
if m.contains != "" && !strings.Contains(s, m.contains) {
return false
}
if m.stringMatcher != nil {
return m.stringMatcher.Matches(s)
}
return m.re.MatchString(s)
}
}
// IsOptimized returns true if any fast-path optimization is applied to the
// regex matcher.
func (m *FastRegexMatcher) IsOptimized() bool {
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != ""
}
// findSetMatches extract equality matches from a regexp.
// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains
// a mix of case sensitive and case insensitive matchers.
func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) {
clearBeginEndText(re)
return findSetMatchesInternal(re, "")
}
func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) {
switch re.Op {
case syntax.OpBeginText:
// Correctly handling the begin text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil, false
case syntax.OpEndText:
// Correctly handling the end text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil, false
case syntax.OpLiteral:
return []string{base + string(re.Rune)}, isCaseSensitive(re)
case syntax.OpEmptyMatch:
if base != "" {
return []string{base}, isCaseSensitive(re)
}
case syntax.OpAlternate:
return findSetMatchesFromAlternate(re, base)
case syntax.OpCapture:
clearCapture(re)
return findSetMatchesInternal(re, base)
case syntax.OpConcat:
return findSetMatchesFromConcat(re, base)
case syntax.OpCharClass:
if len(re.Rune)%2 != 0 {
return nil, false
}
var matches []string
var totalSet int
for i := 0; i+1 < len(re.Rune); i += 2 {
totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1
}
// limits the total characters that can be used to create matches.
// In some case like negation [^0-9] a lot of possibilities exists and that
// can create thousands of possible matches at which points we're better off using regexp.
if totalSet > maxSetMatches {
return nil, false
}
for i := 0; i+1 < len(re.Rune); i += 2 {
lo, hi := re.Rune[i], re.Rune[i+1]
for c := lo; c <= hi; c++ {
matches = append(matches, base+string(c))
}
}
return matches, isCaseSensitive(re)
default:
return nil, false
}
return nil, false
}
func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
if len(re.Sub) == 0 {
return nil, false
}
clearCapture(re.Sub...)
matches = []string{base}
for i := 0; i < len(re.Sub); i++ {
var newMatches []string
for j, b := range matches {
m, caseSensitive := findSetMatchesInternal(re.Sub[i], b)
if m == nil {
return nil, false
}
if tooManyMatches(newMatches, m...) {
return nil, false
}
// All matches must have the same case sensitivity. If it's the first set of matches
// returned, we store its sensitivity as the expected case, and then we'll check all
// other ones.
if i == 0 && j == 0 {
matchesCaseSensitive = caseSensitive
}
if matchesCaseSensitive != caseSensitive {
return nil, false
}
newMatches = append(newMatches, m...)
}
matches = newMatches
}
return matches, matchesCaseSensitive
}
func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
for i, sub := range re.Sub {
found, caseSensitive := findSetMatchesInternal(sub, base)
if found == nil {
return nil, false
}
if tooManyMatches(matches, found...) {
return nil, false
}
// All matches must have the same case sensitivity. If it's the first set of matches
// returned, we store its sensitivity as the expected case, and then we'll check all
// other ones.
if i == 0 {
matchesCaseSensitive = caseSensitive
}
if matchesCaseSensitive != caseSensitive {
return nil, false
}
matches = append(matches, found...)
}
return matches, matchesCaseSensitive
}
// clearCapture removes capture operation as they are not used for matching.
func clearCapture(regs ...*syntax.Regexp) {
for _, r := range regs {
// Iterate on the regexp because capture groups could be nested.
for r.Op == syntax.OpCapture {
*r = *r.Sub[0]
}
}
}
// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string.
func clearBeginEndText(re *syntax.Regexp) {
// Do not clear begin/end text from an alternate operator because it could
// change the actual regexp properties.
if re.Op == syntax.OpAlternate {
return
}
if len(re.Sub) == 0 {
return
}
if len(re.Sub) == 1 {
if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText {
// We need to remove this element. Since it's the only one, we convert into a matcher of an empty string.
// OpEmptyMatch is regexp's nop operator.
re.Op = syntax.OpEmptyMatch
re.Sub = nil
return
}
}
if re.Sub[0].Op == syntax.OpBeginText {
re.Sub = re.Sub[1:]
}
if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText {
re.Sub = re.Sub[:len(re.Sub)-1]
}
}
// isCaseInsensitive tells if a regexp is case insensitive.
// The flag should be check at each level of the syntax tree.
func isCaseInsensitive(reg *syntax.Regexp) bool {
return (reg.Flags & syntax.FoldCase) != 0
}
// isCaseSensitive tells if a regexp is case sensitive.
// The flag should be check at each level of the syntax tree.
func isCaseSensitive(reg *syntax.Regexp) bool {
return !isCaseInsensitive(reg)
}
// tooManyMatches guards against creating too many set matches.
func tooManyMatches(matches []string, added ...string) bool {
return len(matches)+len(added) > maxSetMatches
}
func (m *FastRegexMatcher) MatchString(s string) bool { func (m *FastRegexMatcher) MatchString(s string) bool {
if m.literal { return m.matchString(s)
return s == m.value }
}
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { func (m *FastRegexMatcher) SetMatches() []string {
return false // IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will
} // also get manipulated in the cached FastRegexMatcher instance.
if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { return slices.Clone(m.setMatches)
return false
}
if m.contains != "" && !strings.Contains(s, m.contains) {
return false
}
return m.re.MatchString(s)
} }
func (m *FastRegexMatcher) GetRegexString() string { func (m *FastRegexMatcher) GetRegexString() string {
if m.literal { return m.reString
return m.value
}
return m.re.String()
} }
func isLiteral(re string) bool { // optimizeAlternatingLiterals optimizes a regex of the form
return regexp.QuoteMeta(re) == re //
// `literal1|literal2|literal3|...`
//
// this function returns an optimized StringMatcher or nil if the regex
// cannot be optimized in this way, and a list of setMatches up to maxSetMatches.
func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
if len(s) == 0 {
return emptyStringMatcher{}, nil
}
estimatedAlternates := strings.Count(s, "|") + 1
// If there are no alternates, check if the string is a literal
if estimatedAlternates == 1 {
if regexp.QuoteMeta(s) == s {
return &equalStringMatcher{s: s, caseSensitive: true}, []string{s}
}
return nil, nil
}
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates)
for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
// Split the string into the next literal and the remainder
subMatch := s[:end]
s = s[end+1:]
// break if any of the submatches are not literals
if regexp.QuoteMeta(subMatch) != subMatch {
return nil, nil
}
multiMatcher.add(subMatch)
}
// break if the remainder is not a literal
if regexp.QuoteMeta(s) != s {
return nil, nil
}
multiMatcher.add(s)
return multiMatcher, multiMatcher.setMatches()
} }
// optimizeConcatRegex returns literal prefix/suffix text that can be safely // optimizeConcatRegex returns literal prefix/suffix text that can be safely
@ -123,3 +398,540 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
return return
} }
// StringMatcher is a matcher that matches a string in place of a regular expression.
type StringMatcher interface {
Matches(s string) bool
}
// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher.
// It returns nil if the regexp is not supported.
func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
clearBeginEndText(re)
m := stringMatcherFromRegexpInternal(re)
m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
return m
}
func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
clearCapture(re)
switch re.Op {
case syntax.OpBeginText:
// Correctly handling the begin text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil
case syntax.OpEndText:
// Correctly handling the end text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil
case syntax.OpPlus:
if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
return nil
}
return &anyNonEmptyStringMatcher{
matchNL: re.Sub[0].Op == syntax.OpAnyChar,
}
case syntax.OpStar:
if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
return nil
}
// If the newline is valid, than this matcher literally match any string (even empty).
if re.Sub[0].Op == syntax.OpAnyChar {
return trueMatcher{}
}
// Any string is fine (including an empty one), as far as it doesn't contain any newline.
return anyStringWithoutNewlineMatcher{}
case syntax.OpQuest:
// Only optimize for ".?".
if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) {
return nil
}
return &zeroOrOneCharacterStringMatcher{
matchNL: re.Sub[0].Op == syntax.OpAnyChar,
}
case syntax.OpEmptyMatch:
return emptyStringMatcher{}
case syntax.OpLiteral:
return &equalStringMatcher{
s: string(re.Rune),
caseSensitive: !isCaseInsensitive(re),
}
case syntax.OpAlternate:
or := make([]StringMatcher, 0, len(re.Sub))
for _, sub := range re.Sub {
m := stringMatcherFromRegexpInternal(sub)
if m == nil {
return nil
}
or = append(or, m)
}
return orStringMatcher(or)
case syntax.OpConcat:
clearCapture(re.Sub...)
if len(re.Sub) == 0 {
return emptyStringMatcher{}
}
if len(re.Sub) == 1 {
return stringMatcherFromRegexpInternal(re.Sub[0])
}
var left, right StringMatcher
// Let's try to find if there's a first and last any matchers.
if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest {
left = stringMatcherFromRegexpInternal(re.Sub[0])
if left == nil {
return nil
}
re.Sub = re.Sub[1:]
}
if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest {
right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1])
if right == nil {
return nil
}
re.Sub = re.Sub[:len(re.Sub)-1]
}
matches, matchesCaseSensitive := findSetMatchesInternal(re, "")
if len(matches) == 0 && len(re.Sub) == 2 {
// We have not find fixed set matches. We look for other known cases that
// we can optimize.
switch {
// Prefix is literal.
case right == nil && re.Sub[0].Op == syntax.OpLiteral:
right = stringMatcherFromRegexpInternal(re.Sub[1])
if right != nil {
matches = []string{string(re.Sub[0].Rune)}
matchesCaseSensitive = !isCaseInsensitive(re.Sub[0])
}
// Suffix is literal.
case left == nil && re.Sub[1].Op == syntax.OpLiteral:
left = stringMatcherFromRegexpInternal(re.Sub[0])
if left != nil {
matches = []string{string(re.Sub[1].Rune)}
matchesCaseSensitive = !isCaseInsensitive(re.Sub[1])
}
}
}
// Ensure we've found some literals to match (optionally with a left and/or right matcher).
// If not, then this optimization doesn't trigger.
if len(matches) == 0 {
return nil
}
// Use the right (and best) matcher based on what we've found.
switch {
// No left and right matchers (only fixed set matches).
case left == nil && right == nil:
// if there's no any matchers on both side it's a concat of literals
or := make([]StringMatcher, 0, len(matches))
for _, match := range matches {
or = append(or, &equalStringMatcher{
s: match,
caseSensitive: matchesCaseSensitive,
})
}
return orStringMatcher(or)
// Right matcher with 1 fixed set match.
case left == nil && len(matches) == 1:
return &literalPrefixStringMatcher{
prefix: matches[0],
prefixCaseSensitive: matchesCaseSensitive,
right: right,
}
// Left matcher with 1 fixed set match.
case right == nil && len(matches) == 1:
return &literalSuffixStringMatcher{
left: left,
suffix: matches[0],
suffixCaseSensitive: matchesCaseSensitive,
}
// We found literals in the middle. We can trigger the fast path only if
// the matches are case sensitive because containsStringMatcher doesn't
// support case insensitive.
case matchesCaseSensitive:
return &containsStringMatcher{
substrings: matches,
left: left,
right: right,
}
}
}
return nil
}
// containsStringMatcher matches a string if it contains any of the substrings.
// If left and right are not nil, it's a contains operation where left and right must match.
// If left is nil, it's a hasPrefix operation and right must match.
// Finally, if right is nil it's a hasSuffix operation and left must match.
type containsStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
// At least one of these strings must match in the "middle", between left and right matchers.
substrings []string
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *containsStringMatcher) Matches(s string) bool {
for _, substr := range m.substrings {
switch {
case m.right != nil && m.left != nil:
searchStartPos := 0
for {
pos := strings.Index(s[searchStartPos:], substr)
if pos < 0 {
break
}
// Since we started searching from searchStartPos, we have to add that offset
// to get the actual position of the substring inside the text.
pos += searchStartPos
// If both the left and right matchers match, then we can stop searching because
// we've found a match.
if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) {
return true
}
// Continue searching for another occurrence of the substring inside the text.
searchStartPos = pos + 1
}
case m.left != nil:
// If we have to check for characters on the left then we need to match a suffix.
if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) {
return true
}
case m.right != nil:
if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) {
return true
}
}
}
return false
}
// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher.
type literalPrefixStringMatcher struct {
prefix string
prefixCaseSensitive bool
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *literalPrefixStringMatcher) Matches(s string) bool {
// Ensure the prefix matches.
if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) {
return false
}
if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) {
return false
}
// Ensure the right side matches.
return m.right.Matches(s[len(m.prefix):])
}
// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher.
type literalSuffixStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
suffix string
suffixCaseSensitive bool
}
func (m *literalSuffixStringMatcher) Matches(s string) bool {
// Ensure the suffix matches.
if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) {
return false
}
if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) {
return false
}
// Ensure the left side matches.
return m.left.Matches(s[:len(s)-len(m.suffix)])
}
// emptyStringMatcher matches an empty string.
type emptyStringMatcher struct{}
func (m emptyStringMatcher) Matches(s string) bool {
return len(s) == 0
}
// orStringMatcher matches any of the sub-matchers.
type orStringMatcher []StringMatcher
func (m orStringMatcher) Matches(s string) bool {
for _, matcher := range m {
if matcher.Matches(s) {
return true
}
}
return false
}
// equalStringMatcher matches a string exactly and support case insensitive.
type equalStringMatcher struct {
s string
caseSensitive bool
}
func (m *equalStringMatcher) Matches(s string) bool {
if m.caseSensitive {
return m.s == s
}
return strings.EqualFold(m.s, s)
}
type multiStringMatcherBuilder interface {
StringMatcher
add(s string)
setMatches() []string
}
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder {
// If the estimated size is low enough, it's faster to use a slice instead of a map.
if estimatedSize < minEqualMultiStringMatcherMapThreshold {
return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
}
return &equalMultiStringMapMatcher{
values: make(map[string]struct{}, estimatedSize),
caseSensitive: caseSensitive,
}
}
// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values.
type equalMultiStringSliceMatcher struct {
values []string
caseSensitive bool
}
func (m *equalMultiStringSliceMatcher) add(s string) {
m.values = append(m.values, s)
}
func (m *equalMultiStringSliceMatcher) setMatches() []string {
return m.values
}
func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
if m.caseSensitive {
for _, v := range m.values {
if s == v {
return true
}
}
} else {
for _, v := range m.values {
if strings.EqualFold(s, v) {
return true
}
}
}
return false
}
// equalMultiStringMapMatcher matches a string exactly against a map of valid values.
type equalMultiStringMapMatcher struct {
// values contains values to match a string against. If the matching is case insensitive,
// the values here must be lowercase.
values map[string]struct{}
caseSensitive bool
}
func (m *equalMultiStringMapMatcher) add(s string) {
if !m.caseSensitive {
s = strings.ToLower(s)
}
m.values[s] = struct{}{}
}
func (m *equalMultiStringMapMatcher) setMatches() []string {
if len(m.values) >= maxSetMatches {
return nil
}
matches := make([]string, 0, len(m.values))
for s := range m.values {
matches = append(matches, s)
}
return matches
}
func (m *equalMultiStringMapMatcher) Matches(s string) bool {
if !m.caseSensitive {
s = strings.ToLower(s)
}
_, ok := m.values[s]
return ok
}
// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string
// (including an empty one) as far as it doesn't contain any newline character.
type anyStringWithoutNewlineMatcher struct{}
func (m anyStringWithoutNewlineMatcher) Matches(s string) bool {
// We need to make sure it doesn't contain a newline. Since the newline is
// an ASCII character, we can use strings.IndexByte().
return strings.IndexByte(s, '\n') == -1
}
// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string.
type anyNonEmptyStringMatcher struct {
matchNL bool
}
func (m *anyNonEmptyStringMatcher) Matches(s string) bool {
if m.matchNL {
// It's OK if the string contains a newline so we just need to make
// sure it's non-empty.
return len(s) > 0
}
// We need to make sure it non-empty and doesn't contain a newline.
// Since the newline is an ASCII character, we can use strings.IndexByte().
return len(s) > 0 && strings.IndexByte(s, '\n') == -1
}
// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence
// of any character. The newline character is matches only if matchNL is set to true.
type zeroOrOneCharacterStringMatcher struct {
matchNL bool
}
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
// Zero or one.
if len(s) > 1 {
return false
}
// No need to check for the newline if the string is empty or matching a newline is OK.
if m.matchNL || len(s) == 0 {
return true
}
return s[0] != '\n'
}
// trueMatcher is a stringMatcher which matches any string (always returns true).
type trueMatcher struct{}
func (m trueMatcher) Matches(_ string) bool {
return true
}
// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In
// this specific case, when we have many strings to match against we can use a map instead
// of iterating over the list of strings.
func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher {
var (
caseSensitive bool
caseSensitiveSet bool
numValues int
)
// Analyse the input StringMatcher to count the number of occurrences
// and ensure all of them have the same case sensitivity.
analyseCallback := func(matcher *equalStringMatcher) bool {
// Ensure we don't have mixed case sensitivity.
if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
return false
} else if !caseSensitiveSet {
caseSensitive = matcher.caseSensitive
caseSensitiveSet = true
}
numValues++
return true
}
if !findEqualStringMatchers(input, analyseCallback) {
return input
}
// If the number of values found is less than the threshold, then we should skip the optimization.
if numValues < threshold {
return input
}
// Parse again the input StringMatcher to extract all values and storing them.
// We can skip the case sensitivity check because we've already checked it and
// if the code reach this point then it means all matchers have the same case sensitivity.
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues)
// Ignore the return value because we already iterated over the input StringMatcher
// and it was all good.
findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
multiMatcher.add(matcher.s)
return true
})
return multiMatcher
}
// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each
// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only*
// composed by an alternation of equalStringMatcher.
func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool {
orInput, ok := input.(orStringMatcher)
if !ok {
return false
}
for _, m := range orInput {
switch casted := m.(type) {
case orStringMatcher:
if !findEqualStringMatchers(m, callback) {
return false
}
case *equalStringMatcher:
if !callback(casted) {
return false
}
default:
// It's not an equal string matcher, so we have to stop searching
// cause this optimization can't be applied.
return false
}
}
return true
}
func hasPrefixCaseInsensitive(s, prefix string) bool {
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
}
func hasSuffixCaseInsensitive(s, suffix string) bool {
return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
}

File diff suppressed because one or more lines are too long

View file

@ -6,4 +6,4 @@ groups:
labels: labels:
instance: localhost instance: localhost
annotation: annotation:
summary: annonations is written without s above summary: annotations is written without s above

View file

@ -595,7 +595,7 @@ func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet {
} }
func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error { func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte) error {
req, err := http.NewRequest("POST", url, bytes.NewReader(b)) req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b))
if err != nil { if err != nil {
return err return err
} }

View file

@ -115,6 +115,12 @@ func (e ErrStorage) Error() string {
return e.Err.Error() return e.Err.Error()
} }
// QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked.
type QueryEngine interface {
NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
}
// QueryLogger is an interface that can be used to log all the queries logged // QueryLogger is an interface that can be used to log all the queries logged
// by the engine. // by the engine.
type QueryLogger interface { type QueryLogger interface {
@ -1061,8 +1067,6 @@ func (ev *evaluator) Eval(expr parser.Expr) (v parser.Value, ws annotations.Anno
// EvalSeriesHelper stores extra information about a series. // EvalSeriesHelper stores extra information about a series.
type EvalSeriesHelper struct { type EvalSeriesHelper struct {
// The grouping key used by aggregation.
groupingKey uint64
// Used to map left-hand to right-hand in binary operations. // Used to map left-hand to right-hand in binary operations.
signature string signature string
} }
@ -1075,8 +1079,6 @@ type EvalNodeHelper struct {
Out Vector Out Vector
// Caches. // Caches.
// label_*.
Dmn map[uint64]labels.Labels
// funcHistogramQuantile for classic histograms. // funcHistogramQuantile for classic histograms.
signatureToMetricWithBuckets map[string]*metricWithBuckets signatureToMetricWithBuckets map[string]*metricWithBuckets
@ -1196,6 +1198,9 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
if prepSeries != nil { if prepSeries != nil {
bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si])
} }
// Don't add histogram size here because we only
// copy the pointer above, not the whole
// histogram.
ev.currentSamples++ ev.currentSamples++
if ev.currentSamples > ev.maxSamples { if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
@ -1221,7 +1226,6 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
if ev.currentSamples > ev.maxSamples { if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.samplesStats.UpdatePeak(ev.currentSamples)
// If this could be an instant query, shortcut so as not to change sort order. // If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp { if ev.endTimestamp == ev.startTimestamp {
@ -1253,17 +1257,7 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
} else { } else {
ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts}
} }
if sample.H == nil { addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps)
if ss.Floats == nil {
ss.Floats = getFPointSlice(numSteps)
}
ss.Floats = append(ss.Floats, FPoint{T: ts, F: sample.F})
} else {
if ss.Histograms == nil {
ss.Histograms = getHPointSlice(numSteps)
}
ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: sample.H})
}
seriess[h] = ss seriess[h] = ss
} }
} }
@ -1285,6 +1279,116 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
return mat, warnings return mat, warnings
} }
func (ev *evaluator) rangeEvalAgg(aggExpr *parser.AggregateExpr, sortedGrouping []string, inputMatrix Matrix, param float64) (Matrix, annotations.Annotations) {
// Keep a copy of the original point slice so that it can be returned to the pool.
origMatrix := slices.Clone(inputMatrix)
defer func() {
for _, s := range origMatrix {
putFPointSlice(s.Floats)
putHPointSlice(s.Histograms)
}
}()
var warnings annotations.Annotations
enh := &EvalNodeHelper{}
tempNumSamples := ev.currentSamples
// Create a mapping from input series to output groups.
buf := make([]byte, 0, 1024)
groupToResultIndex := make(map[uint64]int)
seriesToResult := make([]int, len(inputMatrix))
var result Matrix
groupCount := 0
for si, series := range inputMatrix {
var groupingKey uint64
groupingKey, buf = generateGroupingKey(series.Metric, sortedGrouping, aggExpr.Without, buf)
index, ok := groupToResultIndex[groupingKey]
// Add a new group if it doesn't exist.
if !ok {
if aggExpr.Op != parser.TOPK && aggExpr.Op != parser.BOTTOMK {
m := generateGroupingLabels(enh, series.Metric, aggExpr.Without, sortedGrouping)
result = append(result, Series{Metric: m})
}
index = groupCount
groupToResultIndex[groupingKey] = index
groupCount++
}
seriesToResult[si] = index
}
groups := make([]groupedAggregation, groupCount)
var k int
var seriess map[uint64]Series
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK:
if !convertibleToInt64(param) {
ev.errorf("Scalar value %v overflows int64", param)
}
k = int(param)
if k > len(inputMatrix) {
k = len(inputMatrix)
}
if k < 1 {
return nil, warnings
}
seriess = make(map[uint64]Series, len(inputMatrix)) // Output series by series hash.
case parser.QUANTILE:
if math.IsNaN(param) || param < 0 || param > 1 {
warnings.Add(annotations.NewInvalidQuantileWarning(param, aggExpr.Param.PositionRange()))
}
}
for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval {
if err := contextDone(ev.ctx, "expression evaluation"); err != nil {
ev.error(err)
}
// Reset number of samples in memory after each timestamp.
ev.currentSamples = tempNumSamples
// Make the function call.
enh.Ts = ts
var ws annotations.Annotations
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK:
result, ws = ev.aggregationK(aggExpr, k, inputMatrix, seriesToResult, groups, enh, seriess)
// If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp {
return result, ws
}
default:
ws = ev.aggregation(aggExpr, param, inputMatrix, result, seriesToResult, groups, enh)
}
warnings.Merge(ws)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
}
// Assemble the output matrix. By the time we get here we know we don't have too many samples.
switch aggExpr.Op {
case parser.TOPK, parser.BOTTOMK:
result = make(Matrix, 0, len(seriess))
for _, ss := range seriess {
result = append(result, ss)
}
default:
// Remove empty result rows.
dst := 0
for _, series := range result {
if len(series.Floats) > 0 || len(series.Histograms) > 0 {
result[dst] = series
dst++
}
}
result = result[:dst]
}
return result, warnings
}
// evalSubquery evaluates given SubqueryExpr and returns an equivalent // evalSubquery evaluates given SubqueryExpr and returns an equivalent
// evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set.
func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) {
@ -1337,28 +1441,44 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
sortedGrouping := e.Grouping sortedGrouping := e.Grouping
slices.Sort(sortedGrouping) slices.Sort(sortedGrouping)
// Prepare a function to initialise series helpers with the grouping key.
buf := make([]byte, 0, 1024)
initSeries := func(series labels.Labels, h *EvalSeriesHelper) {
h.groupingKey, buf = generateGroupingKey(series, sortedGrouping, e.Without, buf)
}
unwrapParenExpr(&e.Param) unwrapParenExpr(&e.Param)
param := unwrapStepInvariantExpr(e.Param) param := unwrapStepInvariantExpr(e.Param)
unwrapParenExpr(&param) unwrapParenExpr(&param)
if s, ok := param.(*parser.StringLiteral); ok {
return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { if e.Op == parser.COUNT_VALUES {
return ev.aggregation(e, sortedGrouping, s.Val, v[0].(Vector), sh[0], enh) valueLabel := param.(*parser.StringLiteral)
if !model.LabelName(valueLabel.Val).IsValid() {
ev.errorf("invalid label name %q", valueLabel)
}
if !e.Without {
sortedGrouping = append(sortedGrouping, valueLabel.Val)
slices.Sort(sortedGrouping)
}
return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
return ev.aggregationCountValues(e, sortedGrouping, valueLabel.Val, v[0].(Vector), enh)
}, e.Expr) }, e.Expr)
} }
return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { var warnings annotations.Annotations
var param float64 originalNumSamples := ev.currentSamples
if e.Param != nil { // param is the number k for topk/bottomk, or q for quantile.
param = v[0].(Vector)[0].F var fParam float64
} if param != nil {
return ev.aggregation(e, sortedGrouping, param, v[1].(Vector), sh[1], enh) val, ws := ev.eval(param)
}, e.Param, e.Expr) warnings.Merge(ws)
fParam = val.(Matrix)[0].Floats[0].F
}
// Now fetch the data to be aggregated.
val, ws := ev.eval(e.Expr)
warnings.Merge(ws)
inputMatrix := val.(Matrix)
result, ws := ev.rangeEvalAgg(e, sortedGrouping, inputMatrix, fParam)
warnings.Merge(ws)
ev.currentSamples = originalNumSamples + result.TotalSamples()
ev.samplesStats.UpdatePeak(ev.currentSamples)
return result, warnings
case *parser.Call: case *parser.Call:
call := FunctionCalls[e.Func.Name] call := FunctionCalls[e.Func.Name]
@ -1540,13 +1660,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
histSamples := totalHPointSize(ss.Histograms) histSamples := totalHPointSize(ss.Histograms)
if len(ss.Floats)+histSamples > 0 { if len(ss.Floats)+histSamples > 0 {
if ev.currentSamples+len(ss.Floats)+histSamples <= ev.maxSamples { if ev.currentSamples+len(ss.Floats)+histSamples > ev.maxSamples {
mat = append(mat, ss)
prevSS = &mat[len(mat)-1]
ev.currentSamples += len(ss.Floats) + histSamples
} else {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
mat = append(mat, ss)
prevSS = &mat[len(mat)-1]
ev.currentSamples += len(ss.Floats) + histSamples
} }
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)
@ -1709,26 +1828,28 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
step++ step++
_, f, h, ok := ev.vectorSelectorSingle(it, e, ts) _, f, h, ok := ev.vectorSelectorSingle(it, e, ts)
if ok { if ok {
if ev.currentSamples < ev.maxSamples { if h == nil {
if h == nil { ev.currentSamples++
if ss.Floats == nil { ev.samplesStats.IncrementSamplesAtStep(step, 1)
ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) if ev.currentSamples > ev.maxSamples {
} ev.error(ErrTooManySamples(env))
ss.Floats = append(ss.Floats, FPoint{F: f, T: ts})
ev.currentSamples++
ev.samplesStats.IncrementSamplesAtStep(step, 1)
} else {
if ss.Histograms == nil {
ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps)
}
point := HPoint{H: h, T: ts}
ss.Histograms = append(ss.Histograms, point)
histSize := point.size()
ev.currentSamples += histSize
ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize))
} }
if ss.Floats == nil {
ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps)
}
ss.Floats = append(ss.Floats, FPoint{F: f, T: ts})
} else { } else {
ev.error(ErrTooManySamples(env)) point := HPoint{H: h, T: ts}
histSize := point.size()
ev.currentSamples += histSize
ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize))
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
if ss.Histograms == nil {
ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps)
}
ss.Histograms = append(ss.Histograms, point)
} }
} }
} }
@ -1856,7 +1977,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
panic(fmt.Errorf("unhandled expression of type: %T", expr)) panic(fmt.Errorf("unhandled expression of type: %T", expr))
} }
// reuseOrGetFPointSlices reuses the space from previous slice to create new slice if the former has lots of room. // reuseOrGetHPointSlices reuses the space from previous slice to create new slice if the former has lots of room.
// The previous slices capacity is adjusted so when it is re-used from the pool it doesn't overflow into the new one. // The previous slices capacity is adjusted so when it is re-used from the pool it doesn't overflow into the new one.
func reuseOrGetHPointSlices(prevSS *Series, numSteps int) (r []HPoint) { func reuseOrGetHPointSlices(prevSS *Series, numSteps int) (r []HPoint) {
if prevSS != nil && cap(prevSS.Histograms)-2*len(prevSS.Histograms) > 0 { if prevSS != nil && cap(prevSS.Histograms)-2*len(prevSS.Histograms) > 0 {
@ -2168,10 +2289,10 @@ loop:
histograms = histograms[:n] histograms = histograms[:n]
continue loop continue loop
} }
if ev.currentSamples >= ev.maxSamples { ev.currentSamples += histograms[n].size()
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.currentSamples += histograms[n].size()
} }
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, f := buf.At() t, f := buf.At()
@ -2180,10 +2301,10 @@ loop:
} }
// Values in the buffer are guaranteed to be smaller than maxt. // Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintFloats { if t >= mintFloats {
if ev.currentSamples >= ev.maxSamples { ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.currentSamples++
if floats == nil { if floats == nil {
floats = getFPointSlice(16) floats = getFPointSlice(16)
} }
@ -2211,22 +2332,22 @@ loop:
histograms = histograms[:n] histograms = histograms[:n]
break break
} }
if ev.currentSamples >= ev.maxSamples { ev.currentSamples += histograms[n].size()
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.currentSamples += histograms[n].size()
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, f := it.At() t, f := it.At()
if t == maxt && !value.IsStaleNaN(f) { if t == maxt && !value.IsStaleNaN(f) {
if ev.currentSamples >= ev.maxSamples { ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
if floats == nil { if floats == nil {
floats = getFPointSlice(16) floats = getFPointSlice(16)
} }
floats = append(floats, FPoint{T: t, F: f}) floats = append(floats, FPoint{T: t, F: f})
ev.currentSamples++
} }
} }
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)
@ -2607,171 +2728,85 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram
} }
type groupedAggregation struct { type groupedAggregation struct {
seen bool // Was this output groups seen in the input at this timestamp.
hasFloat bool // Has at least 1 float64 sample aggregated. hasFloat bool // Has at least 1 float64 sample aggregated.
hasHistogram bool // Has at least 1 histogram sample aggregated. hasHistogram bool // Has at least 1 histogram sample aggregated.
labels labels.Labels
floatValue float64 floatValue float64
histogramValue *histogram.FloatHistogram histogramValue *histogram.FloatHistogram
floatMean float64 floatMean float64
histogramMean *histogram.FloatHistogram
groupCount int groupCount int
heap vectorByValueHeap heap vectorByValueHeap
reverseHeap vectorByReverseValueHeap
} }
// aggregation evaluates an aggregation operation on a Vector. The provided grouping labels // aggregation evaluates sum, avg, count, stdvar, stddev or quantile at one timestep on inputMatrix.
// must be sorted. // These functions produce one output series for each group specified in the expression, with just the labels from `by(...)`.
func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { // outputMatrix should be already populated with grouping labels; groups is one-to-one with outputMatrix.
// seriesToResult maps inputMatrix indexes to outputMatrix indexes.
func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix, outputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper) annotations.Annotations {
op := e.Op op := e.Op
without := e.Without
var annos annotations.Annotations var annos annotations.Annotations
result := map[uint64]*groupedAggregation{} for i := range groups {
orderedResult := []*groupedAggregation{} groups[i].seen = false
var k int64
if op == parser.TOPK || op == parser.BOTTOMK {
f := param.(float64)
if !convertibleToInt64(f) {
ev.errorf("Scalar value %v overflows int64", f)
}
k = int64(f)
if k < 1 {
return Vector{}, annos
}
}
var q float64
if op == parser.QUANTILE {
q = param.(float64)
}
var valueLabel string
var recomputeGroupingKey bool
if op == parser.COUNT_VALUES {
valueLabel = param.(string)
if !model.LabelName(valueLabel).IsValid() {
ev.errorf("invalid label name %q", valueLabel)
}
if !without {
// We're changing the grouping labels so we have to ensure they're still sorted
// and we have to flag to recompute the grouping key. Considering the count_values()
// operator is less frequently used than other aggregations, we're fine having to
// re-compute the grouping key on each step for this case.
grouping = append(grouping, valueLabel)
slices.Sort(grouping)
recomputeGroupingKey = true
}
} }
var buf []byte for si := range inputMatrix {
for si, s := range vec { f, h, ok := ev.nextValues(enh.Ts, &inputMatrix[si])
metric := s.Metric
if op == parser.COUNT_VALUES {
enh.resetBuilder(metric)
enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64))
metric = enh.lb.Labels()
// We've changed the metric so we have to recompute the grouping key.
recomputeGroupingKey = true
}
// We can use the pre-computed grouping key unless grouping labels have changed.
var groupingKey uint64
if !recomputeGroupingKey {
groupingKey = seriesHelper[si].groupingKey
} else {
groupingKey, buf = generateGroupingKey(metric, grouping, without, buf)
}
group, ok := result[groupingKey]
// Add a new group if it doesn't exist.
if !ok { if !ok {
var m labels.Labels continue
enh.resetBuilder(metric) }
switch {
case without: group := &groups[seriesToResult[si]]
enh.lb.Del(grouping...) // Initialize this group if it's the first time we've seen it.
enh.lb.Del(labels.MetricName) if !group.seen {
m = enh.lb.Labels() *group = groupedAggregation{
case len(grouping) > 0: seen: true,
enh.lb.Keep(grouping...) floatValue: f,
m = enh.lb.Labels() floatMean: f,
default:
m = labels.EmptyLabels()
}
newAgg := &groupedAggregation{
labels: m,
floatValue: s.F,
floatMean: s.F,
groupCount: 1, groupCount: 1,
} }
switch {
case s.H == nil:
newAgg.hasFloat = true
case op == parser.SUM:
newAgg.histogramValue = s.H.Copy()
newAgg.hasHistogram = true
case op == parser.AVG:
newAgg.histogramMean = s.H.Copy()
newAgg.hasHistogram = true
case op == parser.STDVAR || op == parser.STDDEV:
newAgg.groupCount = 0
}
result[groupingKey] = newAgg
orderedResult = append(orderedResult, newAgg)
inputVecLen := int64(len(vec))
resultSize := k
switch {
case k > inputVecLen:
resultSize = inputVecLen
case k == 0:
resultSize = 1
}
switch op { switch op {
case parser.SUM, parser.AVG:
if h == nil {
group.hasFloat = true
} else {
group.histogramValue = h.Copy()
group.hasHistogram = true
}
case parser.STDVAR, parser.STDDEV: case parser.STDVAR, parser.STDDEV:
result[groupingKey].floatValue = 0 group.floatValue = 0
case parser.TOPK, parser.QUANTILE: case parser.QUANTILE:
result[groupingKey].heap = make(vectorByValueHeap, 1, resultSize) group.heap = make(vectorByValueHeap, 1)
result[groupingKey].heap[0] = Sample{ group.heap[0] = Sample{F: f}
F: s.F,
Metric: s.Metric,
}
case parser.BOTTOMK:
result[groupingKey].reverseHeap = make(vectorByReverseValueHeap, 1, resultSize)
result[groupingKey].reverseHeap[0] = Sample{
F: s.F,
Metric: s.Metric,
}
case parser.GROUP: case parser.GROUP:
result[groupingKey].floatValue = 1 group.floatValue = 1
} }
continue continue
} }
switch op { switch op {
case parser.SUM: case parser.SUM:
if s.H != nil { if h != nil {
group.hasHistogram = true group.hasHistogram = true
if group.histogramValue != nil { if group.histogramValue != nil {
group.histogramValue.Add(s.H) group.histogramValue.Add(h)
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
// previously and will be invalid anyway. No // previously and will be invalid anyway. No
// point in copying the histogram in that case. // point in copying the histogram in that case.
} else { } else {
group.hasFloat = true group.hasFloat = true
group.floatValue += s.F group.floatValue += f
} }
case parser.AVG: case parser.AVG:
group.groupCount++ group.groupCount++
if s.H != nil { if h != nil {
group.hasHistogram = true group.hasHistogram = true
if group.histogramMean != nil { if group.histogramValue != nil {
left := s.H.Copy().Div(float64(group.groupCount)) left := h.Copy().Div(float64(group.groupCount))
right := group.histogramMean.Copy().Div(float64(group.groupCount)) right := group.histogramValue.Copy().Div(float64(group.groupCount))
toAdd := left.Sub(right) toAdd := left.Sub(right)
group.histogramMean.Add(toAdd) group.histogramValue.Add(toAdd)
} }
// Otherwise the aggregation contained floats // Otherwise the aggregation contained floats
// previously and will be invalid anyway. No // previously and will be invalid anyway. No
@ -2779,13 +2814,13 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
} else { } else {
group.hasFloat = true group.hasFloat = true
if math.IsInf(group.floatMean, 0) { if math.IsInf(group.floatMean, 0) {
if math.IsInf(s.F, 0) && (group.floatMean > 0) == (s.F > 0) { if math.IsInf(f, 0) && (group.floatMean > 0) == (f > 0) {
// The `floatMean` and `s.F` values are `Inf` of the same sign. They // The `floatMean` and `s.F` values are `Inf` of the same sign. They
// can't be subtracted, but the value of `floatMean` is correct // can't be subtracted, but the value of `floatMean` is correct
// already. // already.
break break
} }
if !math.IsInf(s.F, 0) && !math.IsNaN(s.F) { if !math.IsInf(f, 0) && !math.IsNaN(f) {
// At this stage, the mean is an infinite. If the added // At this stage, the mean is an infinite. If the added
// value is neither an Inf or a Nan, we can keep that mean // value is neither an Inf or a Nan, we can keep that mean
// value. // value.
@ -2796,81 +2831,48 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
} }
} }
// Divide each side of the `-` by `group.groupCount` to avoid float64 overflows. // Divide each side of the `-` by `group.groupCount` to avoid float64 overflows.
group.floatMean += s.F/float64(group.groupCount) - group.floatMean/float64(group.groupCount) group.floatMean += f/float64(group.groupCount) - group.floatMean/float64(group.groupCount)
} }
case parser.GROUP: case parser.GROUP:
// Do nothing. Required to avoid the panic in `default:` below. // Do nothing. Required to avoid the panic in `default:` below.
case parser.MAX: case parser.MAX:
if group.floatValue < s.F || math.IsNaN(group.floatValue) { if group.floatValue < f || math.IsNaN(group.floatValue) {
group.floatValue = s.F group.floatValue = f
} }
case parser.MIN: case parser.MIN:
if group.floatValue > s.F || math.IsNaN(group.floatValue) { if group.floatValue > f || math.IsNaN(group.floatValue) {
group.floatValue = s.F group.floatValue = f
} }
case parser.COUNT, parser.COUNT_VALUES: case parser.COUNT:
group.groupCount++ group.groupCount++
case parser.STDVAR, parser.STDDEV: case parser.STDVAR, parser.STDDEV:
if s.H == nil { // Ignore native histograms. if h == nil { // Ignore native histograms.
group.groupCount++ group.groupCount++
delta := s.F - group.floatMean delta := f - group.floatMean
group.floatMean += delta / float64(group.groupCount) group.floatMean += delta / float64(group.groupCount)
group.floatValue += delta * (s.F - group.floatMean) group.floatValue += delta * (f - group.floatMean)
}
case parser.TOPK:
// We build a heap of up to k elements, with the smallest element at heap[0].
switch {
case int64(len(group.heap)) < k:
heap.Push(&group.heap, &Sample{
F: s.F,
Metric: s.Metric,
})
case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is bigger than the previous smallest element - overwrite that.
group.heap[0] = Sample{
F: s.F,
Metric: s.Metric,
}
if k > 1 {
heap.Fix(&group.heap, 0) // Maintain the heap invariant.
}
}
case parser.BOTTOMK:
// We build a heap of up to k elements, with the biggest element at heap[0].
switch {
case int64(len(group.reverseHeap)) < k:
heap.Push(&group.reverseHeap, &Sample{
F: s.F,
Metric: s.Metric,
})
case group.reverseHeap[0].F > s.F || (math.IsNaN(group.reverseHeap[0].F) && !math.IsNaN(s.F)):
// This new element is smaller than the previous biggest element - overwrite that.
group.reverseHeap[0] = Sample{
F: s.F,
Metric: s.Metric,
}
if k > 1 {
heap.Fix(&group.reverseHeap, 0) // Maintain the heap invariant.
}
} }
case parser.QUANTILE: case parser.QUANTILE:
group.heap = append(group.heap, s) group.heap = append(group.heap, Sample{F: f})
default: default:
panic(fmt.Errorf("expected aggregation operator but got %q", op)) panic(fmt.Errorf("expected aggregation operator but got %q", op))
} }
} }
// Construct the result Vector from the aggregated groups. // Construct the output matrix from the aggregated groups.
for _, aggr := range orderedResult { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
for ri, aggr := range groups {
if !aggr.seen {
continue
}
switch op { switch op {
case parser.AVG: case parser.AVG:
if aggr.hasFloat && aggr.hasHistogram { if aggr.hasFloat && aggr.hasHistogram {
@ -2879,12 +2881,12 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
continue continue
} }
if aggr.hasHistogram { if aggr.hasHistogram {
aggr.histogramValue = aggr.histogramMean.Compact(0) aggr.histogramValue = aggr.histogramValue.Compact(0)
} else { } else {
aggr.floatValue = aggr.floatMean aggr.floatValue = aggr.floatMean
} }
case parser.COUNT, parser.COUNT_VALUES: case parser.COUNT:
aggr.floatValue = float64(aggr.groupCount) aggr.floatValue = float64(aggr.groupCount)
case parser.STDVAR: case parser.STDVAR:
@ -2893,36 +2895,7 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
case parser.STDDEV: case parser.STDDEV:
aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount)) aggr.floatValue = math.Sqrt(aggr.floatValue / float64(aggr.groupCount))
case parser.TOPK:
// The heap keeps the lowest value on top, so reverse it.
if len(aggr.heap) > 1 {
sort.Sort(sort.Reverse(aggr.heap))
}
for _, v := range aggr.heap {
enh.Out = append(enh.Out, Sample{
Metric: v.Metric,
F: v.F,
})
}
continue // Bypass default append.
case parser.BOTTOMK:
// The heap keeps the highest value on top, so reverse it.
if len(aggr.reverseHeap) > 1 {
sort.Sort(sort.Reverse(aggr.reverseHeap))
}
for _, v := range aggr.reverseHeap {
enh.Out = append(enh.Out, Sample{
Metric: v.Metric,
F: v.F,
})
}
continue // Bypass default append.
case parser.QUANTILE: case parser.QUANTILE:
if math.IsNaN(q) || q < 0 || q > 1 {
annos.Add(annotations.NewInvalidQuantileWarning(q, e.Param.PositionRange()))
}
aggr.floatValue = quantile(q, aggr.heap) aggr.floatValue = quantile(q, aggr.heap)
case parser.SUM: case parser.SUM:
@ -2938,13 +2911,196 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, grouping []string, par
// For other aggregations, we already have the right value. // For other aggregations, we already have the right value.
} }
ss := &outputMatrix[ri]
addToSeries(ss, enh.Ts, aggr.floatValue, aggr.histogramValue, numSteps)
}
return annos
}
// aggregationK evaluates topk or bottomk at one timestep on inputMatrix.
// Output that has the same labels as the input, but just k of them per group.
// seriesToResult maps inputMatrix indexes to groups indexes.
// For an instant query, returns a Matrix in descending order for topk or ascending for bottomk.
// For a range query, aggregates output in the seriess map.
func (ev *evaluator) aggregationK(e *parser.AggregateExpr, k int, inputMatrix Matrix, seriesToResult []int, groups []groupedAggregation, enh *EvalNodeHelper, seriess map[uint64]Series) (Matrix, annotations.Annotations) {
op := e.Op
var s Sample
var annos annotations.Annotations
for i := range groups {
groups[i].seen = false
}
for si := range inputMatrix {
f, _, ok := ev.nextValues(enh.Ts, &inputMatrix[si])
if !ok {
continue
}
s = Sample{Metric: inputMatrix[si].Metric, F: f}
group := &groups[seriesToResult[si]]
// Initialize this group if it's the first time we've seen it.
if !group.seen {
*group = groupedAggregation{
seen: true,
heap: make(vectorByValueHeap, 1, k),
}
group.heap[0] = s
continue
}
switch op {
case parser.TOPK:
// We build a heap of up to k elements, with the smallest element at heap[0].
switch {
case len(group.heap) < k:
heap.Push(&group.heap, &s)
case group.heap[0].F < s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is bigger than the previous smallest element - overwrite that.
group.heap[0] = s
if k > 1 {
heap.Fix(&group.heap, 0) // Maintain the heap invariant.
}
}
case parser.BOTTOMK:
// We build a heap of up to k elements, with the biggest element at heap[0].
switch {
case len(group.heap) < k:
heap.Push((*vectorByReverseValueHeap)(&group.heap), &s)
case group.heap[0].F > s.F || (math.IsNaN(group.heap[0].F) && !math.IsNaN(s.F)):
// This new element is smaller than the previous biggest element - overwrite that.
group.heap[0] = s
if k > 1 {
heap.Fix((*vectorByReverseValueHeap)(&group.heap), 0) // Maintain the heap invariant.
}
}
default:
panic(fmt.Errorf("expected aggregation operator but got %q", op))
}
}
// Construct the result from the aggregated groups.
numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1
var mat Matrix
if ev.endTimestamp == ev.startTimestamp {
mat = make(Matrix, 0, len(groups))
}
add := func(lbls labels.Labels, f float64) {
// If this could be an instant query, add directly to the matrix so the result is in consistent order.
if ev.endTimestamp == ev.startTimestamp {
mat = append(mat, Series{Metric: lbls, Floats: []FPoint{{T: enh.Ts, F: f}}})
} else {
// Otherwise the results are added into seriess elements.
hash := lbls.Hash()
ss, ok := seriess[hash]
if !ok {
ss = Series{Metric: lbls}
}
addToSeries(&ss, enh.Ts, f, nil, numSteps)
seriess[hash] = ss
}
}
for _, aggr := range groups {
if !aggr.seen {
continue
}
switch op {
case parser.TOPK:
// The heap keeps the lowest value on top, so reverse it.
if len(aggr.heap) > 1 {
sort.Sort(sort.Reverse(aggr.heap))
}
for _, v := range aggr.heap {
add(v.Metric, v.F)
}
case parser.BOTTOMK:
// The heap keeps the highest value on top, so reverse it.
if len(aggr.heap) > 1 {
sort.Sort(sort.Reverse((*vectorByReverseValueHeap)(&aggr.heap)))
}
for _, v := range aggr.heap {
add(v.Metric, v.F)
}
}
}
return mat, annos
}
// aggregationK evaluates count_values on vec.
// Outputs as many series per group as there are values in the input.
func (ev *evaluator) aggregationCountValues(e *parser.AggregateExpr, grouping []string, valueLabel string, vec Vector, enh *EvalNodeHelper) (Vector, annotations.Annotations) {
type groupCount struct {
labels labels.Labels
count int
}
result := map[uint64]*groupCount{}
var buf []byte
for _, s := range vec {
enh.resetBuilder(s.Metric)
enh.lb.Set(valueLabel, strconv.FormatFloat(s.F, 'f', -1, 64))
metric := enh.lb.Labels()
// Considering the count_values()
// operator is less frequently used than other aggregations, we're fine having to
// re-compute the grouping key on each step for this case.
var groupingKey uint64
groupingKey, buf = generateGroupingKey(metric, grouping, e.Without, buf)
group, ok := result[groupingKey]
// Add a new group if it doesn't exist.
if !ok {
result[groupingKey] = &groupCount{
labels: generateGroupingLabels(enh, metric, e.Without, grouping),
count: 1,
}
continue
}
group.count++
}
// Construct the result Vector from the aggregated groups.
for _, aggr := range result {
enh.Out = append(enh.Out, Sample{ enh.Out = append(enh.Out, Sample{
Metric: aggr.labels, Metric: aggr.labels,
F: aggr.floatValue, F: float64(aggr.count),
H: aggr.histogramValue,
}) })
} }
return enh.Out, annos return enh.Out, nil
}
func addToSeries(ss *Series, ts int64, f float64, h *histogram.FloatHistogram, numSteps int) {
if h == nil {
if ss.Floats == nil {
ss.Floats = getFPointSlice(numSteps)
}
ss.Floats = append(ss.Floats, FPoint{T: ts, F: f})
return
}
if ss.Histograms == nil {
ss.Histograms = getHPointSlice(numSteps)
}
ss.Histograms = append(ss.Histograms, HPoint{T: ts, H: h})
}
func (ev *evaluator) nextValues(ts int64, series *Series) (f float64, h *histogram.FloatHistogram, b bool) {
switch {
case len(series.Floats) > 0 && series.Floats[0].T == ts:
f = series.Floats[0].F
series.Floats = series.Floats[1:] // Move input vectors forward
case len(series.Histograms) > 0 && series.Histograms[0].T == ts:
h = series.Histograms[0].H
series.Histograms = series.Histograms[1:]
default:
return f, h, false
}
return f, h, true
} }
// groupingKey builds and returns the grouping key for the given metric and // groupingKey builds and returns the grouping key for the given metric and
@ -2962,6 +3118,21 @@ func generateGroupingKey(metric labels.Labels, grouping []string, without bool,
return metric.HashForLabels(buf, grouping...) return metric.HashForLabels(buf, grouping...)
} }
func generateGroupingLabels(enh *EvalNodeHelper, metric labels.Labels, without bool, grouping []string) labels.Labels {
enh.resetBuilder(metric)
switch {
case without:
enh.lb.Del(grouping...)
enh.lb.Del(labels.MetricName)
return enh.lb.Labels()
case len(grouping) > 0:
enh.lb.Keep(grouping...)
return enh.lb.Labels()
default:
return labels.EmptyLabels()
}
}
// btos returns 1 if b is true, 0 otherwise. // btos returns 1 if b is true, 0 otherwise.
func btos(b bool) float64 { func btos(b bool) float64 {
if b { if b {

View file

@ -755,6 +755,7 @@ load 10s
metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100 metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100
metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100
metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100
metricWith1HistogramEvery10Seconds {{schema:1 count:5 sum:20 buckets:[1 2 1 1]}}+{{schema:1 count:10 sum:5 buckets:[1 2 3 4]}}x100
`) `)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
@ -795,6 +796,15 @@ load 10s
21000: 1, 21000: 1,
}, },
}, },
{
Query: "metricWith1HistogramEvery10Seconds",
Start: time.Unix(21, 0),
PeakSamples: 12,
TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{
21000: 12,
},
},
{ {
// timestamp function has a special handling. // timestamp function has a special handling.
Query: "timestamp(metricWith1SampleEvery10Seconds)", Query: "timestamp(metricWith1SampleEvery10Seconds)",
@ -805,6 +815,15 @@ load 10s
21000: 1, 21000: 1,
}, },
}, },
{
Query: "timestamp(metricWith1HistogramEvery10Seconds)",
Start: time.Unix(21, 0),
PeakSamples: 13, // histogram size 12 + 1 extra because of timestamp
TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{
21000: 1,
},
},
{ {
Query: "metricWith1SampleEvery10Seconds", Query: "metricWith1SampleEvery10Seconds",
Start: time.Unix(22, 0), Start: time.Unix(22, 0),
@ -877,11 +896,20 @@ load 10s
201000: 6, 201000: 6,
}, },
}, },
{
Query: "metricWith1HistogramEvery10Seconds[60s]",
Start: time.Unix(201, 0),
PeakSamples: 72,
TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 72,
},
},
{ {
Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 10, PeakSamples: 10,
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 60/5 (using 59s so we always return 6 samples TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples
// as if we run a query on 00 looking back 60 seconds we will return 7 samples; // as if we run a query on 00 looking back 60 seconds we will return 7 samples;
// see next test). // see next test).
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
@ -892,12 +920,22 @@ load 10s
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 11, PeakSamples: 11,
TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) + 2 as TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 26, 201000: 26,
}, },
}, },
{
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 72,
TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 312,
},
},
{ {
Query: "metricWith1SampleEvery10Seconds[60s] @ 30", Query: "metricWith1SampleEvery10Seconds[60s] @ 30",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
@ -907,6 +945,15 @@ load 10s
201000: 4, 201000: 4,
}, },
}, },
{
Query: "metricWith1HistogramEvery10Seconds[60s] @ 30",
Start: time.Unix(201, 0),
PeakSamples: 48,
TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 48,
},
},
{ {
Query: "sum(max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))", Query: "sum(max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
@ -919,7 +966,7 @@ load 10s
{ {
Query: "sum by (b) (max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))", Query: "sum by (b) (max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 8, PeakSamples: 7,
TotalSamples: 12, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 3 series TotalSamples: 12, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 3 series
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 12, 201000: 12,
@ -1035,13 +1082,42 @@ load 10s
}, },
}, },
{ {
// timestamp function as a special handling Query: `metricWith1HistogramEvery10Seconds`,
Start: time.Unix(204, 0),
End: time.Unix(223, 0),
Interval: 5 * time.Second,
PeakSamples: 48,
TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{
204000: 12, // aligned to the step time, not the sample time
209000: 12,
214000: 12,
219000: 12,
},
},
{
// timestamp function has a special handling
Query: "timestamp(metricWith1SampleEvery10Seconds)", Query: "timestamp(metricWith1SampleEvery10Seconds)",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
End: time.Unix(220, 0), End: time.Unix(220, 0),
Interval: 5 * time.Second, Interval: 5 * time.Second,
PeakSamples: 5, PeakSamples: 5,
TotalSamples: 4, // (1 sample / 10 seconds) * 4 steps TotalSamples: 4, // 1 sample per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 1,
206000: 1,
211000: 1,
216000: 1,
},
},
{
// timestamp function has a special handling
Query: "timestamp(metricWith1HistogramEvery10Seconds)",
Start: time.Unix(201, 0),
End: time.Unix(220, 0),
Interval: 5 * time.Second,
PeakSamples: 16,
TotalSamples: 4, // 1 sample per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 1, 201000: 1,
206000: 1, 206000: 1,
@ -3438,7 +3514,39 @@ func TestNativeHistogram_HistogramStdDevVar(t *testing.T) {
}, },
NegativeBuckets: []int64{1, 0}, NegativeBuckets: []int64{1, 0},
}, },
stdVar: 1544.8582535368798, // actual variance: 1738.4082 stdVar: 1844.4651144196398, // actual variance: 1738.4082
},
{
name: "-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3",
h: &histogram.Histogram{
Count: 10,
ZeroCount: 0,
Sum: -112946,
Schema: 0,
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 3},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 1},
{Offset: 2, Length: 1},
},
NegativeBuckets: []int64{1, 0, 0, 0, 0, 2, -2, 0},
},
stdVar: 759352122.1939945, // actual variance: 882690990
},
{
name: "-10 x10",
h: &histogram.Histogram{
Count: 10,
ZeroCount: 0,
Sum: -100,
Schema: 0,
NegativeSpans: []histogram.Span{
{Offset: 4, Length: 1},
},
NegativeBuckets: []int64{10},
},
stdVar: 1.725830020304794, // actual variance: 0
}, },
{ {
name: "-50, -8, 0, 3, 8, 9, 100, NaN", name: "-50, -8, 0, 3, 8, 9, 100, NaN",

View file

@ -1111,11 +1111,17 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
it := sample.H.AllBucketIterator() it := sample.H.AllBucketIterator()
for it.Next() { for it.Next() {
bucket := it.At() bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64 var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper { if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0 val = 0
} else { } else {
val = math.Sqrt(bucket.Upper * bucket.Lower) val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
} }
delta := val - mean delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
@ -1144,11 +1150,17 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
it := sample.H.AllBucketIterator() it := sample.H.AllBucketIterator()
for it.Next() { for it.Next() {
bucket := it.At() bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64 var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper { if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0 val = 0
} else { } else {
val = math.Sqrt(bucket.Upper * bucket.Lower) val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
} }
delta := val - mean delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)

View file

@ -3696,9 +3696,17 @@ func makeInt64Pointer(val int64) *int64 {
return valp return valp
} }
func readable(s string) string {
const maxReadableStringLen = 40
if len(s) < maxReadableStringLen {
return s
}
return s[:maxReadableStringLen] + "..."
}
func TestParseExpressions(t *testing.T) { func TestParseExpressions(t *testing.T) {
for _, test := range testExpr { for _, test := range testExpr {
t.Run(test.input, func(t *testing.T) { t.Run(readable(test.input), func(t *testing.T) {
expr, err := ParseExpr(test.input) expr, err := ParseExpr(test.input)
// Unexpected errors are always caused by a bug. // Unexpected errors are always caused by a bug.
@ -3706,7 +3714,31 @@ func TestParseExpressions(t *testing.T) {
if !test.fail { if !test.fail {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.expected, expr, "error on input '%s'", test.input) expected := test.expected
// The FastRegexMatcher is not comparable with a deep equal, so only compare its String() version.
if actualVector, ok := expr.(*VectorSelector); ok {
require.IsType(t, &VectorSelector{}, test.expected, "error on input '%s'", test.input)
expectedVector := test.expected.(*VectorSelector)
require.Len(t, actualVector.LabelMatchers, len(expectedVector.LabelMatchers), "error on input '%s'", test.input)
for i := 0; i < len(actualVector.LabelMatchers); i++ {
expectedMatcher := expectedVector.LabelMatchers[i].String()
actualMatcher := actualVector.LabelMatchers[i].String()
require.Equal(t, expectedMatcher, actualMatcher, "unexpected label matcher '%s' on input '%s'", actualMatcher, test.input)
}
// Make a shallow copy of the expected expr (because the test cases are defined in a global variable)
// and then reset the LabelMatcher to not compared them with the following deep equal.
expectedCopy := *expectedVector
expectedCopy.LabelMatchers = nil
expected = &expectedCopy
actualVector.LabelMatchers = nil
}
require.Equal(t, expected, expr, "error on input '%s'", test.input)
} else { } else {
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())

View file

@ -46,6 +46,7 @@ var (
patSpace = regexp.MustCompile("[\t ]+") patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`) patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
) )
const ( const (
@ -72,7 +73,7 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
} }
// RunBuiltinTests runs an acceptance test suite against the provided engine. // RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine engineQuerier) { func RunBuiltinTests(t *testing.T, engine QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
@ -89,11 +90,19 @@ func RunBuiltinTests(t *testing.T, engine engineQuerier) {
} }
// RunTest parses and runs the test against the provided engine. // RunTest parses and runs the test against the provided engine.
func RunTest(t testutil.T, input string, engine engineQuerier) { func RunTest(t testutil.T, input string, engine QueryEngine) {
test, err := newTest(t, input) require.NoError(t, runTest(t, input, engine))
require.NoError(t, err) }
func runTest(t testutil.T, input string, engine QueryEngine) error {
test, err := newTest(t, input)
// Why do this before checking err? newTest() can create the test storage and then return an error,
// and we want to make sure to clean that up to avoid leaking goroutines.
defer func() { defer func() {
if test == nil {
return
}
if test.storage != nil { if test.storage != nil {
test.storage.Close() test.storage.Close()
} }
@ -102,11 +111,19 @@ func RunTest(t testutil.T, input string, engine engineQuerier) {
} }
}() }()
for _, cmd := range test.cmds { if err != nil {
// TODO(fabxc): aggregate command errors, yield diffs for result return err
// comparison errors.
require.NoError(t, test.exec(cmd, engine))
} }
for _, cmd := range test.cmds {
if err := test.exec(cmd, engine); err != nil {
// TODO(fabxc): aggregate command errors, yield diffs for result
// comparison errors.
return err
}
}
return nil
} }
// test is a sequence of read and write commands that are run // test is a sequence of read and write commands that are run
@ -137,11 +154,6 @@ func newTest(t testutil.T, input string) (*test, error) {
//go:embed testdata //go:embed testdata
var testsFs embed.FS var testsFs embed.FS
type engineQuerier interface {
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
}
func raise(line int, format string, v ...interface{}) error { func raise(line int, format string, v ...interface{}) error {
return &parser.ParseErr{ return &parser.ParseErr{
LineOffset: line, LineOffset: line,
@ -188,15 +200,26 @@ func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValu
} }
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
if !patEvalInstant.MatchString(lines[i]) { instantParts := patEvalInstant.FindStringSubmatch(lines[i])
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>") rangeParts := patEvalRange.FindStringSubmatch(lines[i])
if instantParts == nil && rangeParts == nil {
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail] range from <from> to <to> step <step> <query>'")
} }
parts := patEvalInstant.FindStringSubmatch(lines[i])
var ( isInstant := instantParts != nil
mod = parts[1]
at = parts[2] var mod string
expr = parts[3] var expr string
)
if isInstant {
mod = instantParts[1]
expr = instantParts[3]
} else {
mod = rangeParts[1]
expr = rangeParts[5]
}
_, err := parser.ParseExpr(expr) _, err := parser.ParseExpr(expr)
if err != nil { if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) { parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
@ -209,15 +232,54 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
return i, nil, err return i, nil, err
} }
offset, err := model.ParseDuration(at) formatErr := func(format string, args ...any) error {
if err != nil { combinedArgs := []any{expr, i + 1}
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
} combinedArgs = append(combinedArgs, args...)
ts := testStartTime.Add(time.Duration(offset)) return fmt.Errorf("error in eval %s (line %v): "+format, combinedArgs...)
}
var cmd *evalCmd
if isInstant {
at := instantParts[2]
offset, err := model.ParseDuration(at)
if err != nil {
return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
}
ts := testStartTime.Add(time.Duration(offset))
cmd = newInstantEvalCmd(expr, ts, i+1)
} else {
from := rangeParts[2]
to := rangeParts[3]
step := rangeParts[4]
parsedFrom, err := model.ParseDuration(from)
if err != nil {
return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err)
}
parsedTo, err := model.ParseDuration(to)
if err != nil {
return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
}
if parsedTo < parsedFrom {
return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
}
parsedStep, err := model.ParseDuration(step)
if err != nil {
return i, nil, formatErr("invalid step definition %q: %s", step, err)
}
cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
}
cmd := newEvalCmd(expr, ts, i+1)
switch mod { switch mod {
case "ordered": case "ordered":
// Ordered results are not supported for range queries, but the regex for range query commands does not allow
// asserting an ordered result, so we don't need to do any error checking here.
cmd.ordered = true cmd.ordered = true
case "fail": case "fail":
cmd.fail = true cmd.fail = true
@ -240,8 +302,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
} }
// Currently, we are not expecting any matrices. // Currently, we are not expecting any matrices.
if len(vals) > 1 { if len(vals) > 1 && isInstant {
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed") return i, nil, formatErr("expecting multiple values in instant evaluation not allowed")
} }
cmd.expectMetric(j, metric, vals...) cmd.expectMetric(j, metric, vals...)
} }
@ -375,8 +437,11 @@ func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
type evalCmd struct { type evalCmd struct {
expr string expr string
start time.Time start time.Time
end time.Time
step time.Duration
line int line int
isRange bool // if false, instant query
fail, ordered bool fail, ordered bool
metrics map[uint64]labels.Labels metrics map[uint64]labels.Labels
@ -392,7 +457,7 @@ func (e entry) String() string {
return fmt.Sprintf("%d: %s", e.pos, e.vals) return fmt.Sprintf("%d: %s", e.pos, e.vals)
} }
func newEvalCmd(expr string, start time.Time, line int) *evalCmd { func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{ return &evalCmd{
expr: expr, expr: expr,
start: start, start: start,
@ -403,6 +468,20 @@ func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
} }
} }
func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line int) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
end: end,
step: step,
line: line,
isRange: true,
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
}
}
func (ev *evalCmd) String() string { func (ev *evalCmd) String() string {
return "eval" return "eval"
} }
@ -425,14 +504,88 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
func (ev *evalCmd) compareResult(result parser.Value) error { func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) { switch val := result.(type) {
case Matrix: case Matrix:
return errors.New("received range result on instant evaluation") if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix")
}
if err := assertMatrixSorted(val); err != nil {
return err
}
seen := map[uint64]bool{}
for _, s := range val {
hash := s.Metric.Hash()
if _, ok := ev.metrics[hash]; !ok {
return fmt.Errorf("unexpected metric %s in result, has %s", s.Metric, formatSeriesResult(s))
}
seen[hash] = true
exp := ev.expected[hash]
var expectedFloats []FPoint
var expectedHistograms []HPoint
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
if ts.After(ev.end) {
return fmt.Errorf("expected %v points for %s, but query time range cannot return this many points", len(exp.vals), ev.metrics[hash])
}
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
if e.Histogram != nil {
expectedHistograms = append(expectedHistograms, HPoint{T: t, H: e.Histogram})
} else if !e.Omitted {
expectedFloats = append(expectedFloats, FPoint{T: t, F: e.Value})
}
}
if len(expectedFloats) != len(s.Floats) || len(expectedHistograms) != len(s.Histograms) {
return fmt.Errorf("expected %v float points and %v histogram points for %s, but got %s", len(expectedFloats), len(expectedHistograms), ev.metrics[hash], formatSeriesResult(s))
}
for i, expected := range expectedFloats {
actual := s.Floats[i]
if expected.T != actual.T {
return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !almostEqual(actual.F, expected.F, defaultEpsilon) {
return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
}
}
for i, expected := range expectedHistograms {
actual := s.Histograms[i]
if expected.T != actual.T {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !actual.H.Equals(expected.H.Compact(0)) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s))
}
}
}
for hash := range ev.expected {
if !seen[hash] {
return fmt.Errorf("expected metric %s not found", ev.metrics[hash])
}
}
case Vector: case Vector:
seen := map[uint64]bool{} seen := map[uint64]bool{}
for pos, v := range val { for pos, v := range val {
fp := v.Metric.Hash() fp := v.Metric.Hash()
if _, ok := ev.metrics[fp]; !ok { if _, ok := ev.metrics[fp]; !ok {
return fmt.Errorf("unexpected metric %s in result", v.Metric) if v.H != nil {
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.H)
}
return fmt.Errorf("unexpected metric %s in result, has value %v", v.Metric, v.F)
} }
exp := ev.expected[fp] exp := ev.expected[fp]
if ev.ordered && exp.pos != pos+1 { if ev.ordered && exp.pos != pos+1 {
@ -440,7 +593,13 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
} }
exp0 := exp.vals[0] exp0 := exp.vals[0]
expH := exp0.Histogram expH := exp0.Histogram
if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) { if expH == nil && v.H != nil {
return fmt.Errorf("expected float value %v for %s but got histogram %s", exp0, v.Metric, HistogramTestExpression(v.H))
}
if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
}
if expH != nil && !expH.Compact(0).Equals(v.H) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H)) return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
} }
if !almostEqual(exp0.Value, v.F, defaultEpsilon) { if !almostEqual(exp0.Value, v.F, defaultEpsilon) {
@ -451,10 +610,6 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
} }
for fp, expVals := range ev.expected { for fp, expVals := range ev.expected {
if !seen[fp] { if !seen[fp] {
fmt.Println("vector result", len(val), ev.expr)
for _, ss := range val {
fmt.Println(" ", ss.Metric, ss.T, ss.F)
}
return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals) return fmt.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
} }
} }
@ -477,6 +632,21 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return nil return nil
} }
func formatSeriesResult(s Series) string {
floatPlural := "s"
histogramPlural := "s"
if len(s.Floats) == 1 {
floatPlural = ""
}
if len(s.Histograms) == 1 {
histogramPlural = ""
}
return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms)
}
// HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil. // HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil.
func HistogramTestExpression(h *histogram.FloatHistogram) string { func HistogramTestExpression(h *histogram.FloatHistogram) string {
if h != nil { if h != nil {
@ -561,7 +731,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
} }
// exec processes a single step of the test. // exec processes a single step of the test.
func (t *test) exec(tc testCommand, engine engineQuerier) error { func (t *test) exec(tc testCommand, engine QueryEngine) error {
switch cmd := tc.(type) { switch cmd := tc.(type) {
case *clearCmd: case *clearCmd:
t.clear() t.clear()
@ -578,74 +748,7 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
} }
case *evalCmd: case *evalCmd:
queries, err := atModifierTestCases(cmd.expr, cmd.start) return t.execEval(cmd, engine)
if err != nil {
return err
}
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
for _, iq := range queries {
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
if err != nil {
return err
}
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
continue
}
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
}
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return err
}
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Ordering isn't defined for range queries.
continue
}
mat := rangeRes.Value.(Matrix)
vec := make(Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
for _, point := range series.Floats {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
break
}
}
for _, point := range series.Histograms {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
break
}
}
}
if _, ok := res.Value.(Scalar); ok {
err = cmd.compareResult(Scalar{V: vec[0].F})
} else {
err = cmd.compareResult(vec)
}
if err != nil {
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
}
}
default: default:
panic("promql.Test.exec: unknown test command type") panic("promql.Test.exec: unknown test command type")
@ -653,6 +756,132 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
return nil return nil
} }
func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error {
if cmd.isRange {
return t.execRangeEval(cmd, engine)
}
return t.execInstantEval(cmd, engine)
}
func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error {
q, err := engine.NewRangeQuery(t.context, t.storage, nil, cmd.expr, cmd.start, cmd.end, cmd.step)
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
return nil
}
return fmt.Errorf("error evaluating query %q (line %d): %w", cmd.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
defer q.Close()
if err := cmd.compareResult(res.Value); err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err)
}
return nil
}
func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
queries, err := atModifierTestCases(cmd.expr, cmd.start)
if err != nil {
return err
}
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
for _, iq := range queries {
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
if err != nil {
return fmt.Errorf("error creating instant query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
continue
}
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
}
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
}
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
continue
}
mat := rangeRes.Value.(Matrix)
if err := assertMatrixSorted(mat); err != nil {
return err
}
vec := make(Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
for _, point := range series.Floats {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
break
}
}
for _, point := range series.Histograms {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
break
}
}
}
if _, ok := res.Value.(Scalar); ok {
err = cmd.compareResult(Scalar{V: vec[0].F})
} else {
err = cmd.compareResult(vec)
}
if err != nil {
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
}
}
return nil
}
func assertMatrixSorted(m Matrix) error {
if len(m) <= 1 {
return nil
}
for i, s := range m[:len(m)-1] {
nextIndex := i + 1
nextMetric := m[nextIndex].Metric
if labels.Compare(s.Metric, nextMetric) > 0 {
return fmt.Errorf("matrix results should always be sorted by labels, but matrix is not sorted: series at index %v with labels %s sorts before series at index %v with labels %s", nextIndex, nextMetric, i, s.Metric)
}
}
return nil
}
// clear the current test storage of all inserted samples. // clear the current test storage of all inserted samples.
func (t *test) clear() { func (t *test) clear() {
if t.storage != nil { if t.storage != nil {
@ -704,8 +933,6 @@ func parseNumber(s string) (float64, error) {
// LazyLoader lazily loads samples into storage. // LazyLoader lazily loads samples into storage.
// This is specifically implemented for unit testing of rules. // This is specifically implemented for unit testing of rules.
type LazyLoader struct { type LazyLoader struct {
testutil.T
loadCmd *loadCmd loadCmd *loadCmd
storage storage.Storage storage storage.Storage
@ -727,13 +954,15 @@ type LazyLoaderOpts struct {
} }
// NewLazyLoader returns an initialized empty LazyLoader. // NewLazyLoader returns an initialized empty LazyLoader.
func NewLazyLoader(t testutil.T, input string, opts LazyLoaderOpts) (*LazyLoader, error) { func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
ll := &LazyLoader{ ll := &LazyLoader{
T: t,
opts: opts, opts: opts,
} }
err := ll.parse(input) err := ll.parse(input)
ll.clear() if err != nil {
return nil, err
}
err = ll.clear()
return ll, err return ll, err
} }
@ -761,15 +990,20 @@ func (ll *LazyLoader) parse(input string) error {
} }
// clear the current test storage of all inserted samples. // clear the current test storage of all inserted samples.
func (ll *LazyLoader) clear() { func (ll *LazyLoader) clear() error {
if ll.storage != nil { if ll.storage != nil {
err := ll.storage.Close() if err := ll.storage.Close(); err != nil {
require.NoError(ll.T, err, "Unexpected error while closing test storage.") return fmt.Errorf("closing test storage: %w", err)
}
} }
if ll.cancelCtx != nil { if ll.cancelCtx != nil {
ll.cancelCtx() ll.cancelCtx()
} }
ll.storage = teststorage.New(ll) var err error
ll.storage, err = teststorage.NewWithError()
if err != nil {
return err
}
opts := EngineOpts{ opts := EngineOpts{
Logger: nil, Logger: nil,
@ -783,6 +1017,7 @@ func (ll *LazyLoader) clear() {
ll.queryEngine = NewEngine(opts) ll.queryEngine = NewEngine(opts)
ll.context, ll.cancelCtx = context.WithCancel(context.Background()) ll.context, ll.cancelCtx = context.WithCancel(context.Background())
return nil
} }
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds). // appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
@ -836,8 +1071,7 @@ func (ll *LazyLoader) Storage() storage.Storage {
} }
// Close closes resources associated with the LazyLoader. // Close closes resources associated with the LazyLoader.
func (ll *LazyLoader) Close() { func (ll *LazyLoader) Close() error {
ll.cancelCtx() ll.cancelCtx()
err := ll.storage.Close() return ll.storage.Close()
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
} }

View file

@ -110,7 +110,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
suite, err := NewLazyLoader(t, c.loadString, LazyLoaderOpts{}) suite, err := NewLazyLoader(c.loadString, LazyLoaderOpts{})
require.NoError(t, err) require.NoError(t, err)
defer suite.Close() defer suite.Close()
@ -156,3 +156,363 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
} }
} }
} }
func TestRunTest(t *testing.T) {
testData := `
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
`
testCases := map[string]struct {
input string
expectedError string
}{
"instant query with expected float result": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
`,
},
"instant query with unexpected float result": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 80
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 80 for {group="canary"} but got 70`,
},
"instant query with expected histogram result": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
eval instant at 0 testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
`,
},
"instant query with unexpected histogram result": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
eval instant at 0 testmetric
testmetric {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
`,
expectedError: `error in eval testmetric (line 5): expected {{schema:-1 count:1 sum:6 offset:1 buckets:[1]}} for {__name__="testmetric"} but got {{schema:-1 count:1 sum:4 offset:1 buckets:[1]}}`,
},
"instant query with float value returned when histogram expected": {
input: `
load 5m
testmetric 2
eval instant at 0 testmetric
testmetric {{}}
`,
expectedError: `error in eval testmetric (line 5): expected histogram {{}} for {__name__="testmetric"} but got float value 2`,
},
"instant query with histogram returned when float expected": {
input: `
load 5m
testmetric {{}}
eval instant at 0 testmetric
testmetric 2
`,
expectedError: `error in eval testmetric (line 5): expected float value 2.000000 for {__name__="testmetric"} but got histogram {{}}`,
},
"instant query, but result has an unexpected series with a float value": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result, has value 70`,
},
"instant query, but result has an unexpected series with a histogram value": {
input: `
load 5m
testmetric {{}}
eval instant at 5m testmetric
`,
expectedError: `error in eval testmetric (line 5): unexpected metric {__name__="testmetric"} in result, has value {count:0, sum:0}`,
},
"instant query, but result is missing a series": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
{group="test"} 100
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} with 3: [100.000000] not found`,
},
"instant query expected to fail, and query fails": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
`,
},
"instant query expected to fail, but query succeeds": {
input: `eval_fail instant at 0s vector(0)`,
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
},
"instant query with results expected to match provided order, and result is in expected order": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
`,
},
"instant query with results expected to match provided order, but result is out of order": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`,
},
"instant query with results expected to match provided order, but result has an unexpected series": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result, has value 400`,
},
"instant query with invalid timestamp": {
input: `eval instant at abc123 vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid timestamp definition "abc123": not a valid duration string: "abc123"`,
},
"range query with expected result": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 70 140
`,
},
"range query with unexpected float value": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 80 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`,
},
"range query with expected histogram values": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
eval range from 0 to 10m step 5m testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
`,
},
"range query with unexpected histogram value": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
eval range from 0 to 10m step 5m testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:7 count:1 buckets:[1] offset:1}} {{schema:-1 sum:8 count:1 buckets:[1] offset:1}}
`,
expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {count:1, sum:7, (1,4]:1}, but got {count:1, sum:5, (1,4]:1} (result has 0 float points [] and 3 histogram points [{count:1, sum:4, (1,4]:1} @[0] {count:1, sum:5, (1,4]:1} @[300000] {count:1, sum:6, (1,4]:1} @[600000]])`,
},
"range query with too many points for query time range": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60 90
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 4 points for {group="production"}, but query time range cannot return this many points`,
},
"range query with missing point in result": {
input: `
load 5m
testmetric 5
eval range from 0 to 6m step 6m testmetric
testmetric 5 10
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 1 float point [5 @[0]] and 0 histogram points []`,
},
"range query with extra point in result": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`,
},
"range query, but result has an unexpected series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result, has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points []`,
},
"range query, but result is missing a series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 70 140
{group="test"} 0 100 200
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} not found`,
},
"range query expected to fail, and query fails": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
`,
},
"range query expected to fail, but query succeeds": {
input: `eval_fail range from 0 to 10m step 5m vector(0)`,
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
},
"range query with from and to timestamps in wrong order": {
input: `eval range from 10m to 9m step 5m vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid test definition, end timestamp (9m) is before start timestamp (10m)`,
},
"range query with sparse output": {
input: `
load 6m
testmetric 1 _ 3
eval range from 0 to 18m step 6m testmetric
testmetric 1 _ 3
`,
},
"range query with float value returned when no value expected": {
input: `
load 6m
testmetric 1 2 3
eval range from 0 to 18m step 6m testmetric
testmetric 1 _ 3
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 3 float points [1 @[0] 2 @[360000] 3 @[720000]] and 0 histogram points []`,
},
"range query with float value returned when histogram expected": {
input: `
load 5m
testmetric 2 3
eval range from 0 to 5m step 5m testmetric
testmetric {{}} {{}}
`,
expectedError: `error in eval testmetric (line 5): expected 0 float points and 2 histogram points for {__name__="testmetric"}, but got 2 float points [2 @[0] 3 @[300000]] and 0 histogram points []`,
},
"range query with histogram returned when float expected": {
input: `
load 5m
testmetric {{}} {{}}
eval range from 0 to 5m step 5m testmetric
testmetric 2 3
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{count:0, sum:0} @[0] {count:0, sum:0} @[300000]]`,
},
"range query with expected mixed results": {
input: `
load 6m
testmetric{group="a"} {{}} _ _
testmetric{group="b"} _ _ 3
eval range from 0 to 12m step 6m sum(testmetric)
{} {{}} _ 3
`,
},
"range query with mixed results and incorrect values": {
input: `
load 5m
testmetric 3 {{}}
eval range from 0 to 5m step 5m testmetric
testmetric {{}} 3
`,
expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`,
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := runTest(t, testCase.input, newTestEngine())
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, testCase.expectedError)
}
})
}
}
func TestAssertMatrixSorted(t *testing.T) {
testCases := map[string]struct {
matrix Matrix
expectedError string
}{
"empty matrix": {
matrix: Matrix{},
},
"matrix with one series": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
},
},
"matrix with two series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
},
},
"matrix with two series, series in reverse order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_1")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 1 with labels {the_label="value_1"} sorts before series at index 0 with labels {the_label="value_2"}`,
},
"matrix with three series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
},
},
"matrix with three series, series not in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 2 with labels {the_label="value_2"} sorts before series at index 1 with labels {the_label="value_3"}`,
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := assertMatrixSorted(testCase.matrix)
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, testCase.expectedError)
}
})
}
}

View file

@ -546,13 +546,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
} }
} }
if numOutOfOrder > 0 { if numOutOfOrder > 0 {
level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder) level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
} }
if numTooOld > 0 { if numTooOld > 0 {
level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "numDropped", numTooOld) level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
} }
if numDuplicates > 0 { if numDuplicates > 0 {
level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates) level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
} }
for metric, lset := range g.seriesInPreviousEval[i] { for metric, lset := range g.seriesInPreviousEval[i] {

View file

@ -43,7 +43,7 @@ type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector,
// EngineQueryFunc returns a new query function that executes instant queries against // EngineQueryFunc returns a new query function that executes instant queries against
// the given engine. // the given engine.
// It converts scalar into vector results. // It converts scalar into vector results.
func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable) QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
q, err := engine.NewInstantQuery(ctx, q, nil, qs, t) q, err := engine.NewInstantQuery(ctx, q, nil, qs, t)
if err != nil { if err != nil {

View file

@ -129,6 +129,11 @@ func (m *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) error {
} }
} }
// UnregisterMetrics unregisters manager metrics.
func (m *Manager) UnregisterMetrics() {
m.metrics.Unregister()
}
func (m *Manager) reloader() { func (m *Manager) reloader() {
reloadIntervalDuration := m.opts.DiscoveryReloadInterval reloadIntervalDuration := m.opts.DiscoveryReloadInterval
if reloadIntervalDuration < model.Duration(5*time.Second) { if reloadIntervalDuration < model.Duration(5*time.Second) {

View file

@ -857,3 +857,16 @@ func getResultFloats(app *collectResultAppender, expectedMetricName string) (res
} }
return result return result
} }
func TestUnregisterMetrics(t *testing.T) {
reg := prometheus.NewRegistry()
// Check that all metrics can be unregistered, allowing a second manager to be created.
for i := 0; i < 2; i++ {
opts := Options{}
manager, err := NewManager(&opts, nil, nil, reg)
require.NotNil(t, manager)
require.NoError(t, err)
// Unregister all metrics.
manager.UnregisterMetrics()
}
}

View file

@ -20,6 +20,7 @@ import (
) )
type scrapeMetrics struct { type scrapeMetrics struct {
reg prometheus.Registerer
// Used by Manager. // Used by Manager.
targetMetadataCache *MetadataMetricsCollector targetMetadataCache *MetadataMetricsCollector
targetScrapePools prometheus.Counter targetScrapePools prometheus.Counter
@ -54,7 +55,7 @@ type scrapeMetrics struct {
} }
func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) {
sm := &scrapeMetrics{} sm := &scrapeMetrics{reg: reg}
// Manager metrics. // Manager metrics.
sm.targetMetadataCache = &MetadataMetricsCollector{ sm.targetMetadataCache = &MetadataMetricsCollector{
@ -260,6 +261,32 @@ func (sm *scrapeMetrics) setTargetMetadataCacheGatherer(gatherer TargetsGatherer
sm.targetMetadataCache.TargetsGatherer = gatherer sm.targetMetadataCache.TargetsGatherer = gatherer
} }
// Unregister unregisters all metrics.
func (sm *scrapeMetrics) Unregister() {
sm.reg.Unregister(sm.targetMetadataCache)
sm.reg.Unregister(sm.targetScrapePools)
sm.reg.Unregister(sm.targetScrapePoolsFailed)
sm.reg.Unregister(sm.targetReloadIntervalLength)
sm.reg.Unregister(sm.targetScrapePoolReloads)
sm.reg.Unregister(sm.targetScrapePoolReloadsFailed)
sm.reg.Unregister(sm.targetSyncIntervalLength)
sm.reg.Unregister(sm.targetScrapePoolSyncsCounter)
sm.reg.Unregister(sm.targetScrapePoolExceededTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetLimit)
sm.reg.Unregister(sm.targetScrapePoolTargetsAdded)
sm.reg.Unregister(sm.targetSyncFailed)
sm.reg.Unregister(sm.targetScrapeExceededBodySizeLimit)
sm.reg.Unregister(sm.targetScrapeCacheFlushForced)
sm.reg.Unregister(sm.targetIntervalLength)
sm.reg.Unregister(sm.targetScrapeSampleLimit)
sm.reg.Unregister(sm.targetScrapeSampleDuplicate)
sm.reg.Unregister(sm.targetScrapeSampleOutOfOrder)
sm.reg.Unregister(sm.targetScrapeSampleOutOfBounds)
sm.reg.Unregister(sm.targetScrapeExemplarOutOfOrder)
sm.reg.Unregister(sm.targetScrapePoolExceededLabelLimits)
sm.reg.Unregister(sm.targetScrapeNativeHistogramBucketLimit)
}
type TargetsGatherer interface { type TargetsGatherer interface {
TargetsActive() map[string][]*Target TargetsActive() map[string][]*Target
} }

View file

@ -726,7 +726,7 @@ var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) {
if s.req == nil { if s.req == nil {
req, err := http.NewRequest("GET", s.URL().String(), nil) req, err := http.NewRequest(http.MethodGet, s.URL().String(), nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -956,13 +956,14 @@ func (c *scrapeCache) iterDone(flushCache bool) {
} }
} }
func (c *scrapeCache) get(met []byte) (*cacheEntry, bool) { func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) {
e, ok := c.series[string(met)] e, ok := c.series[string(met)]
if !ok { if !ok {
return nil, false return nil, false, false
} }
alreadyScraped := e.lastIter == c.iter
e.lastIter = c.iter e.lastIter = c.iter
return e, true return e, true, alreadyScraped
} }
func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) { func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
@ -1568,7 +1569,7 @@ loop:
if sl.cache.getDropped(met) { if sl.cache.getDropped(met) {
continue continue
} }
ce, ok := sl.cache.get(met) ce, ok, seriesAlreadyScraped := sl.cache.get(met)
var ( var (
ref storage.SeriesRef ref storage.SeriesRef
hash uint64 hash uint64
@ -1577,6 +1578,7 @@ loop:
if ok { if ok {
ref = ce.ref ref = ce.ref
lset = ce.lset lset = ce.lset
hash = ce.hash
// Update metadata only if it changed in the current iteration. // Update metadata only if it changed in the current iteration.
updateMetadata(lset, false) updateMetadata(lset, false)
@ -1613,25 +1615,36 @@ loop:
updateMetadata(lset, true) updateMetadata(lset, true)
} }
if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { if seriesAlreadyScraped {
ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) err = storage.ErrDuplicateSampleForTimestamp
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. } else {
// CT is an experimental feature. For now, we don't need to fail the if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil {
// scrape on errors updating the created timestamp, log debug. ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs)
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now.
// CT is an experimental feature. For now, we don't need to fail the
// scrape on errors updating the created timestamp, log debug.
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err)
}
}
if isHistogram {
if h != nil {
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
}
} else {
ref, err = app.Append(ref, lset, t, val)
} }
} }
if isHistogram { if err == nil {
if h != nil { if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
ref, err = app.AppendHistogram(ref, lset, t, h, nil) sl.cache.trackStaleness(ce.hash, ce.lset)
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
} }
} else {
ref, err = app.Append(ref, lset, t, val)
} }
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
if err != nil { if err != nil {
if !errors.Is(err, storage.ErrNotFound) { if !errors.Is(err, storage.ErrNotFound) {
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
@ -1652,6 +1665,8 @@ loop:
// Increment added even if there's an error so we correctly report the // Increment added even if there's an error so we correctly report the
// number of samples remaining after relabeling. // number of samples remaining after relabeling.
// We still report duplicated samples here since this number should be the exact number
// of time series exposed on a scrape after relabelling.
added++ added++
exemplars = exemplars[:0] // Reset and reuse the exemplar slice. exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
@ -1746,12 +1761,9 @@ loop:
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample or bucket limit errors. // whether the caller should continue to process more samples, and any sample or bucket limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
switch { switch {
case err == nil: case err == nil:
if (tp == nil || sl.trackTimestampsStaleness) && ce != nil {
sl.cache.trackStaleness(ce.hash, ce.lset)
}
return true, nil return true, nil
case errors.Is(err, storage.ErrNotFound): case errors.Is(err, storage.ErrNotFound):
return false, storage.ErrNotFound return false, storage.ErrNotFound
@ -1874,7 +1886,7 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
} }
func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error { func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error {
ce, ok := sl.cache.get(s) ce, ok, _ := sl.cache.get(s)
var ref storage.SeriesRef var ref storage.SeriesRef
var lset labels.Labels var lset labels.Labels
if ok { if ok {

View file

@ -1069,6 +1069,7 @@ func makeTestMetrics(n int) []byte {
fmt.Fprintf(&sb, "# HELP metric_a help text\n") fmt.Fprintf(&sb, "# HELP metric_a help text\n")
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
} }
fmt.Fprintf(&sb, "# EOF\n")
return sb.Bytes() return sb.Bytes()
} }
@ -2636,6 +2637,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
_, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{}) _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{})
require.Error(t, err) require.Error(t, err)
require.NoError(t, slApp.Rollback()) require.NoError(t, slApp.Rollback())
// We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them,
// which would cause ErrDuplicateSampleForTimestamp errors on the next append.
sl.cache.iterDone(true)
q, err := s.Querier(time.Time{}.UnixNano(), 0) q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err) require.NoError(t, err)
@ -2972,7 +2976,7 @@ func TestReuseCacheRace(t *testing.T) {
func TestCheckAddError(t *testing.T) { func TestCheckAddError(t *testing.T) {
var appErrs appendErrors var appErrs appendErrors
sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)} sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)}
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
require.Equal(t, 1, appErrs.numOutOfOrder) require.Equal(t, 1, appErrs.numOutOfOrder)
} }
@ -3601,6 +3605,34 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
} }
} }
// When a scrape contains multiple instances for the same time series we should increment
// prometheus_target_scrapes_sample_duplicate_timestamp_total metric.
func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
ctx, sl := simpleTestScrapeLoop(t)
slApp := sl.appender(ctx)
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "", time.Time{})
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 1, seriesAdded)
slApp = sl.appender(ctx)
total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{})
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded)
metric := dto.Metric{}
err = sl.metrics.targetScrapeSampleDuplicate.Write(&metric)
require.NoError(t, err)
value := metric.GetCounter().GetValue()
require.Equal(t, 4.0, value)
}
// This tests running a full scrape loop and checking that the scrape option // This tests running a full scrape loop and checking that the scrape option
// `native_histogram_min_bucket_factor` is used correctly. // `native_histogram_min_bucket_factor` is used correctly.
func TestNativeHistogramMaxSchemaSet(t *testing.T) { func TestNativeHistogramMaxSchemaSet(t *testing.T) {

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- name: install Go - name: install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
@ -35,4 +35,4 @@ jobs:
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with: with:
version: v1.55.2 version: v1.56.2

View file

@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
fi fi
# List of files that should be synced. # List of files that should be synced.
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml" SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml"
# Go to the root of the repo # Go to the root of the repo
cd "$(git rev-parse --show-cdup)" || exit 1 cd "$(git rev-parse --show-cdup)" || exit 1
@ -99,6 +99,15 @@ check_go() {
curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/go.mod" curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/go.mod"
} }
check_docker() {
local org_repo
local default_branch
org_repo="$1"
default_branch="$2"
curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/Dockerfile"
}
process_repo() { process_repo() {
local org_repo local org_repo
local default_branch local default_branch
@ -119,6 +128,10 @@ process_repo() {
echo "${org_repo} is not Go, skipping golangci-lint.yml." echo "${org_repo} is not Go, skipping golangci-lint.yml."
continue continue
fi fi
if [[ "${source_file}" == '.github/workflows/container_description.yml' ]] && ! check_docker "${org_repo}" "${default_branch}" ; then
echo "${org_repo} has no Dockerfile, skipping container_description.yml."
continue
fi
if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then
echo "LICENSE in ${org_repo} is not apache, skipping." echo "LICENSE in ${org_repo} is not apache, skipping."
continue continue
@ -131,7 +144,7 @@ process_repo() {
if [[ -z "${target_file}" ]]; then if [[ -z "${target_file}" ]]; then
echo "${target_filename} doesn't exist in ${org_repo}" echo "${target_filename} doesn't exist in ${org_repo}"
case "${source_file}" in case "${source_file}" in
CODE_OF_CONDUCT.md | SECURITY.md) CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/container_description.yml)
echo "${source_file} missing in ${org_repo}, force updating." echo "${source_file} missing in ${org_repo}, force updating."
needs_update+=("${source_file}") needs_update+=("${source_file}")
;; ;;

View file

@ -357,12 +357,12 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var p ChunkQuerier var p ChunkQuerier
if tc.primaryChkQuerierSeries != nil { if tc.primaryChkQuerierSeries != nil {
p = &mockChunkQurier{toReturn: tc.primaryChkQuerierSeries} p = &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries}
} }
var qs []ChunkQuerier var qs []ChunkQuerier
for _, in := range tc.chkQuerierSeries { for _, in := range tc.chkQuerierSeries {
qs = append(qs, &mockChunkQurier{toReturn: in}) qs = append(qs, &mockChunkQuerier{toReturn: in})
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
@ -934,7 +934,7 @@ func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints,
return NewMockSeriesSet(cpy...) return NewMockSeriesSet(cpy...)
} }
type mockChunkQurier struct { type mockChunkQuerier struct {
LabelQuerier LabelQuerier
toReturn []ChunkSeries toReturn []ChunkSeries
@ -948,7 +948,7 @@ func (a chunkSeriesByLabel) Less(i, j int) bool {
return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 return labels.Compare(a[i].Labels(), a[j].Labels()) < 0
} }
func (m *mockChunkQurier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet { func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet {
cpy := make([]ChunkSeries, len(m.toReturn)) cpy := make([]ChunkSeries, len(m.toReturn))
copy(cpy, m.toReturn) copy(cpy, m.toReturn)
if sortSeries { if sortSeries {

View file

@ -61,6 +61,12 @@ type OAuthConfig struct {
TenantID string `yaml:"tenant_id,omitempty"` TenantID string `yaml:"tenant_id,omitempty"`
} }
// SDKConfig is used to store azure SDK config values.
type SDKConfig struct {
// TenantID is the tenantId of the azure active directory application that is being used to authenticate.
TenantID string `yaml:"tenant_id,omitempty"`
}
// AzureADConfig is used to store the config values. // AzureADConfig is used to store the config values.
type AzureADConfig struct { //nolint:revive // exported. type AzureADConfig struct { //nolint:revive // exported.
// ManagedIdentity is the managed identity that is being used to authenticate. // ManagedIdentity is the managed identity that is being used to authenticate.
@ -69,6 +75,9 @@ type AzureADConfig struct { //nolint:revive // exported.
// OAuth is the oauth config that is being used to authenticate. // OAuth is the oauth config that is being used to authenticate.
OAuth *OAuthConfig `yaml:"oauth,omitempty"` OAuth *OAuthConfig `yaml:"oauth,omitempty"`
// OAuth is the oauth config that is being used to authenticate.
SDK *SDKConfig `yaml:"sdk,omitempty"`
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
Cloud string `yaml:"cloud,omitempty"` Cloud string `yaml:"cloud,omitempty"`
} }
@ -102,14 +111,22 @@ func (c *AzureADConfig) Validate() error {
return fmt.Errorf("must provide a cloud in the Azure AD config") return fmt.Errorf("must provide a cloud in the Azure AD config")
} }
if c.ManagedIdentity == nil && c.OAuth == nil { if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil {
return fmt.Errorf("must provide an Azure Managed Identity or Azure OAuth in the Azure AD config") return fmt.Errorf("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
} }
if c.ManagedIdentity != nil && c.OAuth != nil { if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config") return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
} }
if c.ManagedIdentity != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
}
if c.OAuth != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
}
if c.ManagedIdentity != nil { if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" { if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config") return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
@ -143,6 +160,17 @@ func (c *AzureADConfig) Validate() error {
} }
} }
if c.SDK != nil {
var err error
if c.SDK.TenantID != "" {
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
}
}
}
return nil return nil
} }
@ -225,6 +253,16 @@ func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
} }
} }
if cfg.SDK != nil {
sdkConfig := &SDKConfig{
TenantID: cfg.SDK.TenantID,
}
cred, err = newSDKTokenCredential(clientOpts, sdkConfig)
if err != nil {
return nil, err
}
}
return cred, nil return cred, nil
} }
@ -241,6 +279,12 @@ func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAut
return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts) return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts)
} }
// newSDKTokenCredential returns new SDK token credential.
func newSDKTokenCredential(clientOpts *azcore.ClientOptions, sdkConfig *SDKConfig) (azcore.TokenCredential, error) {
opts := &azidentity.DefaultAzureCredentialOptions{ClientOptions: *clientOpts, TenantID: sdkConfig.TenantID}
return azidentity.NewDefaultAzureCredential(opts)
}
// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of // newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of
// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests. // refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests.
func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) { func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) {

View file

@ -39,7 +39,7 @@ const (
testTokenString = "testTokenString" testTokenString = "testTokenString"
) )
var testTokenExpiry = time.Now().Add(5 * time.Second) func testTokenExpiry() time.Time { return time.Now().Add(5 * time.Second) }
type AzureAdTestSuite struct { type AzureAdTestSuite struct {
suite.Suite suite.Suite
@ -94,7 +94,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
testToken := &azcore.AccessToken{ testToken := &azcore.AccessToken{
Token: testTokenString, Token: testTokenString,
ExpiresOn: testTokenExpiry, ExpiresOn: testTokenExpiry(),
} }
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
@ -145,7 +145,7 @@ func TestAzureAdConfig(t *testing.T) {
// Missing managedidentiy or oauth field. // Missing managedidentiy or oauth field.
{ {
filename: "testdata/azuread_bad_configmissing.yaml", filename: "testdata/azuread_bad_configmissing.yaml",
err: "must provide an Azure Managed Identity or Azure OAuth in the Azure AD config", err: "must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config",
}, },
// Invalid managedidentity client id. // Invalid managedidentity client id.
{ {
@ -162,6 +162,11 @@ func TestAzureAdConfig(t *testing.T) {
filename: "testdata/azuread_bad_twoconfig.yaml", filename: "testdata/azuread_bad_twoconfig.yaml",
err: "cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config", err: "cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config",
}, },
// Invalid config when both sdk and oauth is provided.
{
filename: "testdata/azuread_bad_oauthsdkconfig.yaml",
err: "cannot provide both Azure OAuth and Azure SDK in the Azure AD config",
},
// Valid config with missing optionally cloud field. // Valid config with missing optionally cloud field.
{ {
filename: "testdata/azuread_good_cloudmissing.yaml", filename: "testdata/azuread_good_cloudmissing.yaml",
@ -174,6 +179,10 @@ func TestAzureAdConfig(t *testing.T) {
{ {
filename: "testdata/azuread_good_oauth.yaml", filename: "testdata/azuread_good_oauth.yaml",
}, },
// Valid SDK config.
{
filename: "testdata/azuread_good_sdk.yaml",
},
} }
for _, c := range cases { for _, c := range cases {
_, err := loadAzureAdConfig(c.filename) _, err := loadAzureAdConfig(c.filename)
@ -232,6 +241,16 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
}, },
err: "Cloud is not specified or is incorrect: ", err: "Cloud is not specified or is incorrect: ",
}, },
// Invalid tokenProvider for SDK.
{
cfg: &AzureADConfig{
Cloud: "PublicAzure",
SDK: &SDKConfig{
TenantID: dummyTenantID,
},
},
err: "Cloud is not specified or is incorrect: ",
},
// Valid tokenProvider for managedidentity. // Valid tokenProvider for managedidentity.
{ {
cfg: &AzureADConfig{ cfg: &AzureADConfig{
@ -252,6 +271,15 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
}, },
}, },
}, },
// Valid tokenProvider for SDK.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
SDK: &SDKConfig{
TenantID: dummyTenantID,
},
},
},
} }
mockGetTokenCallCounter := 1 mockGetTokenCallCounter := 1
for _, c := range cases { for _, c := range cases {
@ -264,11 +292,11 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
} else { } else {
testToken := &azcore.AccessToken{ testToken := &azcore.AccessToken{
Token: testTokenString, Token: testTokenString,
ExpiresOn: testTokenExpiry, ExpiresOn: testTokenExpiry(),
} }
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once(). s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil) On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil).Once()
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)

View file

@ -0,0 +1,7 @@
cloud: AzurePublic
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!
tenant_id: 00000000-a12b-3cd4-e56f-000000000000
sdk:
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -0,0 +1,3 @@
cloud: AzurePublic
sdk:
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -199,7 +199,7 @@ type RecoverableError struct {
// Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled // Store sends a batch of samples to the HTTP endpoint, the request is the proto marshalled
// and encoded bytes from codec.go. // and encoded bytes from codec.go.
func (c *Client) Store(ctx context.Context, req []byte, attempt int) error { func (c *Client) Store(ctx context.Context, req []byte, attempt int) error {
httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(req)) httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(req))
if err != nil { if err != nil {
// Errors from NewRequest are from unparsable URLs, so are not // Errors from NewRequest are from unparsable URLs, so are not
// recoverable. // recoverable.
@ -290,7 +290,7 @@ func (c *Client) Read(ctx context.Context, query *prompb.Query) (*prompb.QueryRe
} }
compressed := snappy.Encode(nil, data) compressed := snappy.Encode(nil, data)
httpReq, err := http.NewRequest("POST", c.urlString, bytes.NewReader(compressed)) httpReq, err := http.NewRequest(http.MethodPost, c.urlString, bytes.NewReader(compressed))
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to create request: %w", err) return nil, fmt.Errorf("unable to create request: %w", err)
} }

View file

@ -3,7 +3,6 @@
This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1]. This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1].
This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`. This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`.
We don't copy in `./prometheus` through this script because that package imports a collector specific featuregate package we don't want to import. The featuregate package is being removed now, and in the future we will copy this folder too.
To update the dependency is a multi-step process: To update the dependency is a multi-step process:
1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib) 1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib)
@ -20,4 +19,4 @@ This means if we depend on the upstream packages directly, we will never able to
When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy
script again to keep things updated. script again to keep things updated.
[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite [^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite

View file

@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import "strings" import "strings"

View file

@ -23,5 +23,5 @@ case $(sed --help 2>&1) in
*) set sed -i '';; *) set sed -i '';;
esac esac
"$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go "$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go ./prometheus/*.go
"$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go "$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go

View file

@ -202,34 +202,16 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
return err return err
} }
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers)
if err != nil { if err := chunks.Err(); err != nil {
return err return err
} }
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
ws, err := StreamChunkedReadResponses( ws, err := StreamChunkedReadResponses(
NewChunkedWriter(w, f), NewChunkedWriter(w, f),
int64(i), int64(i),
// The streaming API has to provide the series sorted. // The streaming API has to provide the series sorted.
querier.Select(ctx, true, hints, filteredMatchers...), chunks,
sortedExternalLabels, sortedExternalLabels,
h.remoteReadMaxBytesInFrame, h.remoteReadMaxBytesInFrame,
h.marshalPool, h.marshalPool,
@ -254,6 +236,35 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
} }
} }
// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet,
// encapsulating the operation in its own function to ensure timely release of
// the querier resources.
func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet {
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
return querier.Select(ctx, true, hints, filteredMatchers...)
}
// filterExtLabelsFromMatchers change equality matchers which match external labels // filterExtLabelsFromMatchers change equality matchers which match external labels
// to a matcher that looks for an empty label, // to a matcher that looks for an empty label,
// as that label should not be present in the storage. // as that label should not be present in the storage.

View file

@ -75,7 +75,7 @@ func TestSampledReadEndpoint(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
compressed := snappy.Encode(nil, data) compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed)) request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed))
require.NoError(t, err) require.NoError(t, err)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -170,7 +170,7 @@ func BenchmarkStreamReadEndpoint(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
compressed := snappy.Encode(nil, data) compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed)) request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed))
require.NoError(b, err) require.NoError(b, err)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()
@ -268,7 +268,7 @@ func TestStreamReadEndpoint(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
compressed := snappy.Encode(nil, data) compressed := snappy.Encode(nil, data)
request, err := http.NewRequest("POST", "", bytes.NewBuffer(compressed)) request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed))
require.NoError(t, err) require.NoError(t, err)
recorder := httptest.NewRecorder() recorder := httptest.NewRecorder()

Some files were not shown because too many files have changed in this diff Show more