Merge branch 'main' into merge-2.51-into-main

Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
Bryan Boreham 2024-03-28 10:00:25 +00:00 committed by GitHub
commit e1a5886c88
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
77 changed files with 3774 additions and 2694 deletions

2
.github/CODEOWNERS vendored
View file

@ -1,7 +1,7 @@
/web/ui @juliusv /web/ui @juliusv
/web/ui/module @juliusv @nexucis /web/ui/module @juliusv @nexucis
/storage/remote @cstyan @bwplotka @tomwilkie /storage/remote @cstyan @bwplotka @tomwilkie
/storage/remote/otlptranslator @gouthamve @jesusvazquez /storage/remote/otlptranslator @aknuds1 @jesusvazquez
/discovery/kubernetes @brancz /discovery/kubernetes @brancz
/tsdb @jesusvazquez /tsdb @jesusvazquez
/promql @roidelapluie /promql @roidelapluie

View file

@ -6,11 +6,11 @@ updates:
interval: "monthly" interval: "monthly"
groups: groups:
k8s.io: k8s.io:
patterns: patterns:
- "k8s.io/*" - "k8s.io/*"
go.opentelemetry.io: go.opentelemetry.io:
patterns: patterns:
- "go.opentelemetry.io/*" - "go.opentelemetry.io/*"
- package-ecosystem: "gomod" - package-ecosystem: "gomod"
directory: "/documentation/examples/remote_storage" directory: "/documentation/examples/remote_storage"
schedule: schedule:

View file

@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0 - uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0

View file

@ -13,7 +13,7 @@ jobs:
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0 - uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0

View file

@ -45,7 +45,8 @@ jobs:
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: make build - run: make build
- run: make test GO_ONLY=1 # Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
test_ui: test_ui:
name: UI tests name: UI tests
@ -157,11 +158,11 @@ jobs:
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.55.2 version: v1.56.2
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'

View file

@ -0,0 +1,52 @@
---
name: Push README to Docker Hub
on:
push:
paths:
- "README.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
permissions:
contents: read
jobs:
PushDockerHubReadme:
runs-on: ubuntu-latest
name: Push README to Docker Hub
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
readme_file: 'README.md'
PushQuayIoReadme:
runs-on: ubuntu-latest
name: Push README to quay.io
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to quay.io
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
readme_file: 'README.md'

View file

@ -48,24 +48,24 @@ linters-settings:
rules: rules:
main: main:
deny: deny:
- pkg: "sync/atomic" - pkg: "sync/atomic"
desc: "Use go.uber.org/atomic instead of sync/atomic" desc: "Use go.uber.org/atomic instead of sync/atomic"
- pkg: "github.com/stretchr/testify/assert" - pkg: "github.com/stretchr/testify/assert"
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- pkg: "github.com/go-kit/kit/log" - pkg: "github.com/go-kit/kit/log"
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- pkg: "io/ioutil" - pkg: "io/ioutil"
desc: "Use corresponding 'os' or 'io' functions instead." desc: "Use corresponding 'os' or 'io' functions instead."
- pkg: "regexp" - pkg: "regexp"
desc: "Use github.com/grafana/regexp instead of regexp" desc: "Use github.com/grafana/regexp instead of regexp"
- pkg: "github.com/pkg/errors" - pkg: "github.com/pkg/errors"
desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors"
- pkg: "gzip" - pkg: "gzip"
desc: "Use github.com/klauspost/compress instead of gzip" desc: "Use github.com/klauspost/compress instead of gzip"
- pkg: "zlib" - pkg: "zlib"
desc: "Use github.com/klauspost/compress instead of zlib" desc: "Use github.com/klauspost/compress instead of zlib"
- pkg: "golang.org/x/exp/slices" - pkg: "golang.org/x/exp/slices"
desc: "Use 'slices' instead." desc: "Use 'slices' instead."
errcheck: errcheck:
exclude-functions: exclude-functions:
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)". # Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
@ -135,4 +135,3 @@ linters-settings:
- require-error - require-error
- suite-dont-use-pkg - suite-dont-use-pkg
- suite-extra-assert-call - suite-extra-assert-call

View file

@ -1,5 +1,7 @@
--- ---
extends: default extends: default
ignore: |
ui/react-app/node_modules
rules: rules:
braces: braces:

View file

@ -1,5 +1,9 @@
# Changelog # Changelog
## unreleased
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
## 2.51.1 / 2024-03-27 ## 2.51.1 / 2024-03-27
Bugfix release. Bugfix release.

View file

@ -90,7 +90,7 @@ can modify the `./promql/parser/generated_parser.y.go` manually.
```golang ```golang
// As of writing this was somewhere around line 600. // As of writing this was somewhere around line 600.
var ( var (
yyDebug = 0 // This can be be a number 0 -> 5. yyDebug = 0 // This can be a number 0 -> 5.
yyErrorVerbose = false // This can be set to true. yyErrorVerbose = false // This can be set to true.
) )

View file

@ -1,7 +1,12 @@
# Maintainers # Maintainers
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers: General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
Maintainers for specific parts of the codebase:
* `cmd` * `cmd`
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl) * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
* `discovery` * `discovery`
@ -12,6 +17,7 @@ Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama) George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `storage` * `storage`
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie) * `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto) * `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
* `web` * `web`
@ -19,7 +25,6 @@ George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis) * `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
* `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ) * `Makefile` and related build configuration: Simon Pasquier (<pasquier.simon@gmail.com> / @simonpasquier), Ben Kochie (<superq@gmail.com> / @SuperQ)
For the sake of brevity, not all subtrees are explicitly listed. Due to the For the sake of brevity, not all subtrees are explicitly listed. Due to the
size of this repository, the natural changes in focus of maintainers over time, size of this repository, the natural changes in focus of maintainers over time,
and nuances of where particular features live, this list will always be and nuances of where particular features live, this list will always be

View file

@ -82,7 +82,7 @@ assets-tarball: assets
.PHONY: parser .PHONY: parser
parser: parser:
@echo ">> running goyacc to generate the .go file." @echo ">> running goyacc to generate the .go file."
ifeq (, $(shell command -v goyacc > /dev/null)) ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping" @echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" @echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
else else

View file

@ -49,7 +49,7 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),) ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.55.2 GOLANGCI_LINT_VERSION ?= v1.56.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -182,7 +182,7 @@ endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null)) ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .
@ -208,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:

View file

@ -960,8 +960,8 @@ func main() {
func() error { func() error {
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally. // Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
select { select {
case <-term: case sig := <-term:
level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...") level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String())
reloadReady.Close() reloadReady.Close()
case <-webHandler.Quit(): case <-webHandler.Quit():
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")

View file

@ -12,4 +12,4 @@ tests:
eval_time: 1m eval_time: 1m
exp_samples: exp_samples:
- value: 1 - value: 1
labels: test labels: test

View file

@ -33,6 +33,7 @@ import (
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
@ -149,8 +150,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
} }
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) { func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
var mu sync.Mutex var total atomic.Uint64
var total uint64
for i := 0; i < scrapeCount; i += 100 { for i := 0; i < scrapeCount; i += 100 {
var wg sync.WaitGroup var wg sync.WaitGroup
@ -165,22 +165,21 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done()
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i)) n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil { if err != nil {
// exitWithError(err) // exitWithError(err)
fmt.Println(" err", err) fmt.Println(" err", err)
} }
mu.Lock() total.Add(n)
total += n
mu.Unlock()
wg.Done()
}() }()
} }
wg.Wait() wg.Wait()
} }
fmt.Println("ingestion completed") fmt.Println("ingestion completed")
return total, nil return total.Load(), nil
} }
func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) { func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) {

View file

@ -175,13 +175,18 @@ type testGroup struct {
} }
// test performs the unit tests. // test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) []error { func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite. // Setup testing suite.
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts) suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil { if err != nil {
return []error{err} return []error{err}
} }
defer suite.Close() defer func() {
err := suite.Close()
if err != nil {
outErr = append(outErr, err)
}
}()
suite.SubqueryInterval = evalInterval suite.SubqueryInterval = evalInterval
// Load the rule files. // Load the rule files.

View file

@ -1840,7 +1840,7 @@ var expectedErrors = []struct {
}, },
{ {
filename: "azure_authentication_method.bad.yml", filename: "azure_authentication_method.bad.yml",
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"", errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\", \"ManagedIdentity\" or \"SDK\"",
}, },
{ {
filename: "azure_bearertoken_basicauth.bad.yml", filename: "azure_bearertoken_basicauth.bad.yml",

View file

@ -65,6 +65,7 @@ const (
azureLabelMachineSize = azureLabel + "machine_size" azureLabelMachineSize = azureLabel + "machine_size"
authMethodOAuth = "OAuth" authMethodOAuth = "OAuth"
authMethodSDK = "SDK"
authMethodManagedIdentity = "ManagedIdentity" authMethodManagedIdentity = "ManagedIdentity"
) )
@ -164,8 +165,8 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
} }
if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity { if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity && c.AuthenticationMethod != authMethodSDK {
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity) return fmt.Errorf("unknown authentication_type %q. Supported types are %q, %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity, authMethodSDK)
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
@ -212,6 +213,14 @@ func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.Discoverer
return d, nil return d, nil
} }
type client interface {
getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error)
getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error)
getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error)
getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error)
getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error)
}
// azureClient represents multiple Azure Resource Manager providers. // azureClient represents multiple Azure Resource Manager providers.
type azureClient struct { type azureClient struct {
nic *armnetwork.InterfacesClient nic *armnetwork.InterfacesClient
@ -221,14 +230,17 @@ type azureClient struct {
logger log.Logger logger log.Logger
} }
var _ client = &azureClient{}
// createAzureClient is a helper function for creating an Azure compute client to ARM. // createAzureClient is a helper function for creating an Azure compute client to ARM.
func createAzureClient(cfg SDConfig) (azureClient, error) { func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
var c azureClient var c azureClient
c.logger = logger
telemetry := policy.TelemetryOptions{ telemetry := policy.TelemetryOptions{
ApplicationID: userAgent, ApplicationID: userAgent,
@ -239,12 +251,12 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
Telemetry: telemetry, Telemetry: telemetry,
}) })
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
options := &arm.ClientOptions{ options := &arm.ClientOptions{
ClientOptions: policy.ClientOptions{ ClientOptions: policy.ClientOptions{
@ -256,25 +268,25 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options) c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options) c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options) c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options) c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
return c, nil return &c, nil
} }
func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) { func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) {
@ -294,6 +306,16 @@ func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azco
return nil, err return nil, err
} }
credential = azcore.TokenCredential(secretCredential) credential = azcore.TokenCredential(secretCredential)
case authMethodSDK:
options := &azidentity.DefaultAzureCredentialOptions{ClientOptions: policyClientOptions}
if len(cfg.TenantID) != 0 {
options.TenantID = cfg.TenantID
}
sdkCredential, err := azidentity.NewDefaultAzureCredential(options)
if err != nil {
return nil, err
}
credential = azcore.TokenCredential(sdkCredential)
} }
return credential, nil return credential, nil
} }
@ -330,12 +352,11 @@ func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, erro
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
defer level.Debug(d.logger).Log("msg", "Azure discovery completed") defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
client, err := createAzureClient(*d.cfg) client, err := createAzureClient(*d.cfg, d.logger)
if err != nil { if err != nil {
d.metrics.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not create Azure client: %w", err) return nil, fmt.Errorf("could not create Azure client: %w", err)
} }
client.logger = d.logger
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
if err != nil { if err != nil {
@ -374,96 +395,8 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
for _, vm := range machines { for _, vm := range machines {
go func(vm virtualMachine) { go func(vm virtualMachine) {
defer wg.Done() defer wg.Done()
r, err := newAzureResourceFromID(vm.ID, d.logger) labelSet, err := d.vmToLabelSet(ctx, client, vm)
if err != nil { ch <- target{labelSet: labelSet, err: err}
ch <- target{labelSet: nil, err: err}
return
}
labels := model.LabelSet{
azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID),
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet)
}
for k, v := range vm.Tags {
name := strutil.SanitizeLabelName(k)
labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v)
}
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
var networkInterface *armnetwork.Interface
if v, ok := d.getFromCache(nicID); ok {
networkInterface = v
d.metrics.cacheHitCount.Add(1)
} else {
if vm.ScaleSet == "" {
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
}
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
ch <- target{labelSet: nil, err: err}
}
// Get out of this routine because we cannot continue without a network interface.
return
}
// Continue processing with the network interface
d.addToCache(nicID, networkInterface)
}
if networkInterface.Properties == nil {
continue
}
// Unfortunately Azure does not return information on whether a VM is deallocated.
// This information is available via another API call however the Go SDK does not
// yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return
}
if *networkInterface.Properties.Primary {
for _, ip := range networkInterface.Properties.IPConfigurations {
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(address)
ch <- target{labelSet: labels, err: nil}
return
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
err = fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
ch <- target{labelSet: nil, err: err}
return
}
}
}
}(vm) }(vm)
} }
@ -484,6 +417,95 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return []*targetgroup.Group{&tg}, nil return []*targetgroup.Group{&tg}, nil
} }
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
r, err := newAzureResourceFromID(vm.ID, d.logger)
if err != nil {
return nil, err
}
labels := model.LabelSet{
azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID),
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet)
}
for k, v := range vm.Tags {
name := strutil.SanitizeLabelName(k)
labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v)
}
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
var networkInterface *armnetwork.Interface
if v, ok := d.getFromCache(nicID); ok {
networkInterface = v
d.metrics.cacheHitCount.Add(1)
} else {
if vm.ScaleSet == "" {
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
}
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
return nil, err
}
// Get out of this routine because we cannot continue without a network interface.
return nil, nil
}
// Continue processing with the network interface
d.addToCache(nicID, networkInterface)
}
if networkInterface.Properties == nil {
continue
}
// Unfortunately Azure does not return information on whether a VM is deallocated.
// This information is available via another API call however the Go SDK does not
// yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return nil, nil
}
if *networkInterface.Properties.Primary {
for _, ip := range networkInterface.Properties.IPConfigurations {
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
return nil, fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
}
}
}
// TODO: Should we say something at this point?
return nil, nil
}
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
var vms []virtualMachine var vms []virtualMachine
if len(resourceGroup) == 0 { if len(resourceGroup) == 0 {

View file

@ -14,16 +14,24 @@
package azure package azure
import ( import (
"context"
"fmt"
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
goleak.VerifyTestMain(m) goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
)
} }
func TestMapFromVMWithEmptyTags(t *testing.T) { func TestMapFromVMWithEmptyTags(t *testing.T) {
@ -79,6 +87,91 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
require.Equal(t, expectedVM, actualVM) require.Equal(t, expectedVM, actualVM)
} }
func TestVMToLabelSet(t *testing.T) {
id := "/subscriptions/00000000-0000-0000-0000-000000000000/test"
name := "name"
size := "size"
vmSize := armcompute.VirtualMachineSizeTypes(size)
osType := armcompute.OperatingSystemTypesLinux
vmType := "type"
location := "westeurope"
computerName := "computer_name"
networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1"
ipAddress := "10.20.30.40"
primary := true
networkProfile := armcompute.NetworkProfile{
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
{
ID: &networkID,
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
},
},
}
properties := &armcompute.VirtualMachineProperties{
OSProfile: &armcompute.OSProfile{
ComputerName: &computerName,
},
StorageProfile: &armcompute.StorageProfile{
OSDisk: &armcompute.OSDisk{
OSType: &osType,
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &armcompute.HardwareProfile{
VMSize: &vmSize,
},
}
testVM := armcompute.VirtualMachine{
ID: &id,
Name: &name,
Type: &vmType,
Location: &location,
Tags: nil,
Properties: properties,
}
expectedVM := virtualMachine{
ID: id,
Name: name,
ComputerName: computerName,
Type: vmType,
Location: location,
OsType: "Linux",
Tags: map[string]*string{},
NetworkInterfaces: []string{networkID},
Size: size,
}
actualVM := mapFromVM(testVM)
require.Equal(t, expectedVM, actualVM)
cfg := DefaultSDConfig
d := &Discovery{
cfg: &cfg,
logger: log.NewNopLogger(),
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
}
network := armnetwork.Interface{
Name: &networkID,
Properties: &armnetwork.InterfacePropertiesFormat{
Primary: &primary,
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
{Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddress: &ipAddress,
}},
},
},
}
client := &mockAzureClient{
networkInterface: &network,
}
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
require.NoError(t, err)
require.Len(t, labelSet, 11)
}
func TestMapFromVMWithEmptyOSType(t *testing.T) { func TestMapFromVMWithEmptyOSType(t *testing.T) {
id := "test" id := "test"
name := "name" name := "name"
@ -381,3 +474,35 @@ func TestNewAzureResourceFromID(t *testing.T) {
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
} }
} }
type mockAzureClient struct {
networkInterface *armnetwork.Interface
}
var _ client = &mockAzureClient{}
func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
return nil, nil
}
func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
if networkInterfaceID == "" {
return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty")
}
return m.networkInterface, nil
}
func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
if scaleSetName == "" {
return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty")
}
return m.networkInterface, nil
}

View file

@ -208,7 +208,6 @@ func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group)
select { select {
case <-timeout: case <-timeout:
t.Fatalf("Expected update but got none") t.Fatalf("Expected update but got none")
return
case <-time.After(defaultWait / 10): case <-time.After(defaultWait / 10):
if ref.Equal(t.lastReceive()) { if ref.Equal(t.lastReceive()) {
// No update received. // No update received.

View file

@ -733,7 +733,6 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
t.Helper() t.Helper()
if _, ok := tSets[poolKey]; !ok { if _, ok := tSets[poolKey]; !ok {
t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets)
return
} }
match := false match := false

View file

@ -733,7 +733,6 @@ func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group,
t.Helper() t.Helper()
if _, ok := tGroups[key]; !ok { if _, ok := tGroups[key]; !ok {
t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups) t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
return
} }
match := false match := false
var mergedTargets string var mergedTargets string

View file

@ -600,8 +600,10 @@ See below for the configuration options for Azure discovery:
# The Azure environment. # The Azure environment.
[ environment: <string> | default = AzurePublicCloud ] [ environment: <string> | default = AzurePublicCloud ]
# The authentication method, either OAuth or ManagedIdentity. # The authentication method, either OAuth, ManagedIdentity or SDK.
# See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview # See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
# SDK authentication method uses environment variables by default.
# See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication
[ authentication_method: <string> | default = OAuth] [ authentication_method: <string> | default = OAuth]
# The subscription ID. Always required. # The subscription ID. Always required.
subscription_id: <string> subscription_id: <string>
@ -3619,6 +3621,11 @@ azuread:
[ client_secret: <string> ] [ client_secret: <string> ]
[ tenant_id: <string> ] ] [ tenant_id: <string> ] ]
# Azure SDK auth.
# See https://learn.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication
[ sdk:
[ tenant_id: <string> ] ]
# Configures the remote write request's TLS settings. # Configures the remote write request's TLS settings.
tls_config: tls_config:
[ <tls_config> ] [ <tls_config> ]

View file

@ -9,7 +9,7 @@ require (
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.5 github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/prometheus/common v0.49.0 github.com/prometheus/common v0.50.0
github.com/prometheus/prometheus v0.50.1 github.com/prometheus/prometheus v0.50.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
) )
@ -58,17 +58,17 @@ require (
go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect
go.uber.org/atomic v1.11.0 // indirect go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.19.0 // indirect golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.21.0 // indirect golang.org/x/net v0.22.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sys v0.17.0 // indirect golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect
google.golang.org/grpc v1.61.0 // indirect google.golang.org/grpc v1.61.0 // indirect
google.golang.org/protobuf v1.32.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.28.6 // indirect k8s.io/apimachinery v0.28.6 // indirect

View file

@ -269,8 +269,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.49.0 h1:ToNTdK4zSnPVJmh698mGFkDor9wBI/iGaJy5dbH1EgI= github.com/prometheus/common v0.50.0 h1:YSZE6aa9+luNa2da6/Tik0q0A5AbR+U003TItK57CPQ=
github.com/prometheus/common v0.49.0/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE= github.com/prometheus/common v0.50.0/go.mod h1:wHFBCEVWVmHMUpg7pYcOm2QUR/ocQdYSJVQJKnHc3xQ=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -332,8 +332,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@ -356,12 +356,12 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -389,12 +389,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -436,8 +436,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

14
go.mod
View file

@ -41,7 +41,7 @@ require (
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.7 github.com/klauspost/compress v1.17.7
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.29.0 github.com/linode/linodego v1.30.0
github.com/miekg/dns v1.1.58 github.com/miekg/dns v1.1.58
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -74,7 +74,6 @@ require (
go.uber.org/automaxprocs v1.5.3 go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0 go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0 go.uber.org/multierr v1.11.0
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/net v0.22.0 golang.org/x/net v0.22.0
golang.org/x/oauth2 v0.18.0 golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.6.0 golang.org/x/sync v0.6.0
@ -84,12 +83,12 @@ require (
google.golang.org/api v0.168.0 google.golang.org/api v0.168.0
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8
google.golang.org/grpc v1.62.1 google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.32.0 google.golang.org/protobuf v1.33.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.2 k8s.io/api v0.29.3
k8s.io/apimachinery v0.29.2 k8s.io/apimachinery v0.29.3
k8s.io/client-go v0.29.2 k8s.io/client-go v0.29.3
k8s.io/klog v1.0.0 k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.120.1 k8s.io/klog/v2 v2.120.1
) )
@ -134,7 +133,7 @@ require (
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/golang/glog v1.2.0 // indirect github.com/golang/glog v1.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
@ -186,6 +185,7 @@ require (
go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect
golang.org/x/crypto v0.21.0 // indirect golang.org/x/crypto v0.21.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.16.0 // indirect golang.org/x/mod v0.16.0 // indirect
golang.org/x/term v0.18.0 // indirect golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect

28
go.sum
View file

@ -280,8 +280,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -424,8 +424,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8=
github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k=
github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@ -471,8 +471,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4= github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k=
github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk= github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -1118,8 +1118,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1161,12 +1161,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=

View file

@ -39,7 +39,8 @@ type Label struct {
} }
func (ls Labels) String() string { func (ls Labels) String() string {
var b bytes.Buffer var bytea [1024]byte // On stack to avoid memory allocation while building the output.
b := bytes.NewBuffer(bytea[:0])
b.WriteByte('{') b.WriteByte('{')
i := 0 i := 0
@ -50,7 +51,7 @@ func (ls Labels) String() string {
} }
b.WriteString(l.Name) b.WriteString(l.Name)
b.WriteByte('=') b.WriteByte('=')
b.WriteString(strconv.Quote(l.Value)) b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value))
i++ i++
}) })
b.WriteByte('}') b.WriteByte('}')

View file

@ -363,13 +363,11 @@ func Compare(a, b Labels) int {
// Now we know that there is some difference before the end of a and b. // Now we know that there is some difference before the end of a and b.
// Go back through the fields and find which field that difference is in. // Go back through the fields and find which field that difference is in.
firstCharDifferent := i firstCharDifferent, i := i, 0
for i = 0; ; { size, nextI := decodeSize(a.data, i)
size, nextI := decodeSize(a.data, i) for nextI+size <= firstCharDifferent {
if nextI+size > firstCharDifferent {
break
}
i = nextI + size i = nextI + size
size, nextI = decodeSize(a.data, i)
} }
// Difference is inside this entry. // Difference is inside this entry.
aStr, _ := decodeString(a.data, i) aStr, _ := decodeString(a.data, i)

View file

@ -43,6 +43,13 @@ func TestLabels_String(t *testing.T) {
} }
} }
func BenchmarkString(b *testing.B) {
ls := New(benchmarkLabels...)
for i := 0; i < b.N; i++ {
_ = ls.String()
}
}
func TestLabels_MatchLabels(t *testing.T) { func TestLabels_MatchLabels(t *testing.T) {
labels := FromStrings( labels := FromStrings(
"__name__", "ALERTS", "__name__", "ALERTS",
@ -529,6 +536,16 @@ var comparisonBenchmarkScenarios = []struct {
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"), FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrz"),
FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"), FromStrings("aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii", "jjj", "kkk", "lll", "mmm", "nnn", "ooo", "ppp", "qqq", "rrr"),
}, },
{
"real long equal",
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
},
{
"real long different end",
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "d3ec90b2-4975-4607-b45d-b9ad64bb417e"),
FromStrings("__name__", "kube_pod_container_status_last_terminated_exitcode", "cluster", "prod-af-north-0", " container", "prometheus", "instance", "kube-state-metrics-0:kube-state-metrics:ksm", "job", "kube-state-metrics/kube-state-metrics", " namespace", "observability-prometheus", "pod", "observability-prometheus-0", "uid", "deadbeef-0000-1111-2222-b9ad64bb417e"),
},
} }
func BenchmarkLabels_Equals(b *testing.B) { func BenchmarkLabels_Equals(b *testing.B) {
@ -789,24 +806,24 @@ func BenchmarkLabels_Hash(b *testing.B) {
} }
} }
func BenchmarkBuilder(b *testing.B) { var benchmarkLabels = []Label{
m := []Label{ {"job", "node"},
{"job", "node"}, {"instance", "123.123.1.211:9090"},
{"instance", "123.123.1.211:9090"}, {"path", "/api/v1/namespaces/<namespace>/deployments/<name>"},
{"path", "/api/v1/namespaces/<namespace>/deployments/<name>"}, {"method", "GET"},
{"method", "GET"}, {"namespace", "system"},
{"namespace", "system"}, {"status", "500"},
{"status", "500"}, {"prometheus", "prometheus-core-1"},
{"prometheus", "prometheus-core-1"}, {"datacenter", "eu-west-1"},
{"datacenter", "eu-west-1"}, {"pod_name", "abcdef-99999-defee"},
{"pod_name", "abcdef-99999-defee"}, }
}
func BenchmarkBuilder(b *testing.B) {
var l Labels var l Labels
builder := NewBuilder(EmptyLabels()) builder := NewBuilder(EmptyLabels())
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
builder.Reset(EmptyLabels()) builder.Reset(EmptyLabels())
for _, l := range m { for _, l := range benchmarkLabels {
builder.Set(l.Name, l.Value) builder.Set(l.Name, l.Value)
} }
l = builder.Labels() l = builder.Labels()
@ -815,18 +832,7 @@ func BenchmarkBuilder(b *testing.B) {
} }
func BenchmarkLabels_Copy(b *testing.B) { func BenchmarkLabels_Copy(b *testing.B) {
m := map[string]string{ l := New(benchmarkLabels...)
"job": "node",
"instance": "123.123.1.211:9090",
"path": "/api/v1/namespaces/<namespace>/deployments/<name>",
"method": "GET",
"namespace": "system",
"status": "500",
"prometheus": "prometheus-core-1",
"datacenter": "eu-west-1",
"pod_name": "abcdef-99999-defee",
}
l := FromMap(m)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
l = l.Copy() l = l.Copy()

View file

@ -118,3 +118,30 @@ func (m *Matcher) GetRegexString() string {
} }
return m.re.GetRegexString() return m.re.GetRegexString()
} }
// SetMatches returns a set of equality matchers for the current regex matchers if possible.
// For examples the regexp `a(b|f)` will returns "ab" and "af".
// Returns nil if we can't replace the regexp by only equality matchers.
func (m *Matcher) SetMatches() []string {
if m.re == nil {
return nil
}
return m.re.SetMatches()
}
// Prefix returns the required prefix of the value to match, if possible.
// It will be empty if it's an equality matcher or if the prefix can't be determined.
func (m *Matcher) Prefix() string {
if m.re == nil {
return ""
}
return m.re.prefix
}
// IsRegexOptimized returns whether regex is optimized.
func (m *Matcher) IsRegexOptimized() bool {
if m.re == nil {
return false
}
return m.re.IsOptimized()
}

View file

@ -14,13 +14,14 @@
package labels package labels
import ( import (
"fmt"
"testing" "testing"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher { func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher {
m, err := NewMatcher(mType, "", value) m, err := NewMatcher(mType, "test_label_name", value)
require.NoError(t, err) require.NoError(t, err)
return m return m
} }
@ -81,6 +82,21 @@ func TestMatcher(t *testing.T) {
value: "foo-bar", value: "foo-bar",
match: false, match: false,
}, },
{
matcher: mustNewMatcher(t, MatchRegexp, "$*bar"),
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "bar^+"),
value: "foo-bar",
match: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "$+bar"),
value: "foo-bar",
match: false,
},
} }
for _, test := range tests { for _, test := range tests {
@ -118,6 +134,82 @@ func TestInverse(t *testing.T) {
} }
} }
func TestPrefix(t *testing.T) {
for i, tc := range []struct {
matcher *Matcher
prefix string
}{
{
matcher: mustNewMatcher(t, MatchEqual, "abc"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchNotEqual, "abc"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abcd|abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchNotRegexp, "abcd|abc.+"),
prefix: "abc",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc(def|ghj)|ab|a."),
prefix: "a",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "foo.+bar|foo.*baz"),
prefix: "foo",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc|.*"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc|def"),
prefix: "",
},
{
matcher: mustNewMatcher(t, MatchRegexp, ".+def"),
prefix: "",
},
} {
t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) {
require.Equal(t, tc.prefix, tc.matcher.Prefix())
})
}
}
func TestIsRegexOptimized(t *testing.T) {
for i, tc := range []struct {
matcher *Matcher
isRegexOptimized bool
}{
{
matcher: mustNewMatcher(t, MatchEqual, "abc"),
isRegexOptimized: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "."),
isRegexOptimized: false,
},
{
matcher: mustNewMatcher(t, MatchRegexp, "abc.+"),
isRegexOptimized: true,
},
} {
t.Run(fmt.Sprintf("%d: %s", i, tc.matcher), func(t *testing.T) {
require.Equal(t, tc.isRegexOptimized, tc.matcher.IsRegexOptimized())
})
}
}
func BenchmarkMatchType_String(b *testing.B) { func BenchmarkMatchType_String(b *testing.B) {
for i := 0; i <= b.N; i++ { for i := 0; i <= b.N; i++ {
_ = MatchType(i % int(MatchNotRegexp+1)).String() _ = MatchType(i % int(MatchNotRegexp+1)).String()

View file

@ -14,73 +14,348 @@
package labels package labels
import ( import (
"slices"
"strings" "strings"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/grafana/regexp/syntax" "github.com/grafana/regexp/syntax"
) )
type FastRegexMatcher struct { const (
re *regexp.Regexp maxSetMatches = 256
prefix string
suffix string
contains string
// shortcut for literals // The minimum number of alternate values a regex should have to trigger
literal bool // the optimization done by optimizeEqualStringMatchers() and so use a map
value string // to match values instead of iterating over a list. This value has
// been computed running BenchmarkOptimizeEqualStringMatchers.
minEqualMultiStringMatcherMapThreshold = 16
)
type FastRegexMatcher struct {
// Under some conditions, re is nil because the expression is never parsed.
// We store the original string to be able to return it in GetRegexString().
reString string
re *regexp.Regexp
setMatches []string
stringMatcher StringMatcher
prefix string
suffix string
contains string
// matchString is the "compiled" function to run by MatchString().
matchString func(string) bool
} }
func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
if isLiteral(v) {
return &FastRegexMatcher{literal: true, value: v}, nil
}
re, err := regexp.Compile("^(?:" + v + ")$")
if err != nil {
return nil, err
}
parsed, err := syntax.Parse(v, syntax.Perl)
if err != nil {
return nil, err
}
m := &FastRegexMatcher{ m := &FastRegexMatcher{
re: re, reString: v,
} }
if parsed.Op == syntax.OpConcat { m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v)
m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) if m.stringMatcher != nil {
// If we already have a string matcher, we don't need to parse the regex
// or compile the matchString function. This also avoids the behavior in
// compileMatchStringFunction where it prefers to use setMatches when
// available, even if the string matcher is faster.
m.matchString = m.stringMatcher.Matches
} else {
parsed, err := syntax.Parse(v, syntax.Perl)
if err != nil {
return nil, err
}
// Simplify the syntax tree to run faster.
parsed = parsed.Simplify()
m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$")
if err != nil {
return nil, err
}
if parsed.Op == syntax.OpConcat {
m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
}
if matches, caseSensitive := findSetMatches(parsed); caseSensitive {
m.setMatches = matches
}
m.stringMatcher = stringMatcherFromRegexp(parsed)
m.matchString = m.compileMatchStringFunction()
} }
return m, nil return m, nil
} }
// compileMatchStringFunction returns the function to run by MatchString().
func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool {
// If the only optimization available is the string matcher, then we can just run it.
if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && m.contains == "" && m.stringMatcher != nil {
return m.stringMatcher.Matches
}
return func(s string) bool {
if len(m.setMatches) != 0 {
for _, match := range m.setMatches {
if match == s {
return true
}
}
return false
}
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
return false
}
if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
return false
}
if m.contains != "" && !strings.Contains(s, m.contains) {
return false
}
if m.stringMatcher != nil {
return m.stringMatcher.Matches(s)
}
return m.re.MatchString(s)
}
}
// IsOptimized returns true if any fast-path optimization is applied to the
// regex matcher.
func (m *FastRegexMatcher) IsOptimized() bool {
return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || m.contains != ""
}
// findSetMatches extract equality matches from a regexp.
// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains
// a mix of case sensitive and case insensitive matchers.
func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) {
clearBeginEndText(re)
return findSetMatchesInternal(re, "")
}
func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) {
switch re.Op {
case syntax.OpBeginText:
// Correctly handling the begin text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil, false
case syntax.OpEndText:
// Correctly handling the end text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil, false
case syntax.OpLiteral:
return []string{base + string(re.Rune)}, isCaseSensitive(re)
case syntax.OpEmptyMatch:
if base != "" {
return []string{base}, isCaseSensitive(re)
}
case syntax.OpAlternate:
return findSetMatchesFromAlternate(re, base)
case syntax.OpCapture:
clearCapture(re)
return findSetMatchesInternal(re, base)
case syntax.OpConcat:
return findSetMatchesFromConcat(re, base)
case syntax.OpCharClass:
if len(re.Rune)%2 != 0 {
return nil, false
}
var matches []string
var totalSet int
for i := 0; i+1 < len(re.Rune); i += 2 {
totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1
}
// limits the total characters that can be used to create matches.
// In some case like negation [^0-9] a lot of possibilities exists and that
// can create thousands of possible matches at which points we're better off using regexp.
if totalSet > maxSetMatches {
return nil, false
}
for i := 0; i+1 < len(re.Rune); i += 2 {
lo, hi := re.Rune[i], re.Rune[i+1]
for c := lo; c <= hi; c++ {
matches = append(matches, base+string(c))
}
}
return matches, isCaseSensitive(re)
default:
return nil, false
}
return nil, false
}
func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
if len(re.Sub) == 0 {
return nil, false
}
clearCapture(re.Sub...)
matches = []string{base}
for i := 0; i < len(re.Sub); i++ {
var newMatches []string
for j, b := range matches {
m, caseSensitive := findSetMatchesInternal(re.Sub[i], b)
if m == nil {
return nil, false
}
if tooManyMatches(newMatches, m...) {
return nil, false
}
// All matches must have the same case sensitivity. If it's the first set of matches
// returned, we store its sensitivity as the expected case, and then we'll check all
// other ones.
if i == 0 && j == 0 {
matchesCaseSensitive = caseSensitive
}
if matchesCaseSensitive != caseSensitive {
return nil, false
}
newMatches = append(newMatches, m...)
}
matches = newMatches
}
return matches, matchesCaseSensitive
}
func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) {
for i, sub := range re.Sub {
found, caseSensitive := findSetMatchesInternal(sub, base)
if found == nil {
return nil, false
}
if tooManyMatches(matches, found...) {
return nil, false
}
// All matches must have the same case sensitivity. If it's the first set of matches
// returned, we store its sensitivity as the expected case, and then we'll check all
// other ones.
if i == 0 {
matchesCaseSensitive = caseSensitive
}
if matchesCaseSensitive != caseSensitive {
return nil, false
}
matches = append(matches, found...)
}
return matches, matchesCaseSensitive
}
// clearCapture removes capture operation as they are not used for matching.
func clearCapture(regs ...*syntax.Regexp) {
for _, r := range regs {
// Iterate on the regexp because capture groups could be nested.
for r.Op == syntax.OpCapture {
*r = *r.Sub[0]
}
}
}
// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string.
func clearBeginEndText(re *syntax.Regexp) {
// Do not clear begin/end text from an alternate operator because it could
// change the actual regexp properties.
if re.Op == syntax.OpAlternate {
return
}
if len(re.Sub) == 0 {
return
}
if len(re.Sub) == 1 {
if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText {
// We need to remove this element. Since it's the only one, we convert into a matcher of an empty string.
// OpEmptyMatch is regexp's nop operator.
re.Op = syntax.OpEmptyMatch
re.Sub = nil
return
}
}
if re.Sub[0].Op == syntax.OpBeginText {
re.Sub = re.Sub[1:]
}
if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText {
re.Sub = re.Sub[:len(re.Sub)-1]
}
}
// isCaseInsensitive tells if a regexp is case insensitive.
// The flag should be check at each level of the syntax tree.
func isCaseInsensitive(reg *syntax.Regexp) bool {
return (reg.Flags & syntax.FoldCase) != 0
}
// isCaseSensitive tells if a regexp is case sensitive.
// The flag should be check at each level of the syntax tree.
func isCaseSensitive(reg *syntax.Regexp) bool {
return !isCaseInsensitive(reg)
}
// tooManyMatches guards against creating too many set matches.
func tooManyMatches(matches []string, added ...string) bool {
return len(matches)+len(added) > maxSetMatches
}
func (m *FastRegexMatcher) MatchString(s string) bool { func (m *FastRegexMatcher) MatchString(s string) bool {
if m.literal { return m.matchString(s)
return s == m.value }
}
if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { func (m *FastRegexMatcher) SetMatches() []string {
return false // IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will
} // also get manipulated in the cached FastRegexMatcher instance.
if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { return slices.Clone(m.setMatches)
return false
}
if m.contains != "" && !strings.Contains(s, m.contains) {
return false
}
return m.re.MatchString(s)
} }
func (m *FastRegexMatcher) GetRegexString() string { func (m *FastRegexMatcher) GetRegexString() string {
if m.literal { return m.reString
return m.value
}
return m.re.String()
} }
func isLiteral(re string) bool { // optimizeAlternatingLiterals optimizes a regex of the form
return regexp.QuoteMeta(re) == re //
// `literal1|literal2|literal3|...`
//
// this function returns an optimized StringMatcher or nil if the regex
// cannot be optimized in this way, and a list of setMatches up to maxSetMatches.
func optimizeAlternatingLiterals(s string) (StringMatcher, []string) {
if len(s) == 0 {
return emptyStringMatcher{}, nil
}
estimatedAlternates := strings.Count(s, "|") + 1
// If there are no alternates, check if the string is a literal
if estimatedAlternates == 1 {
if regexp.QuoteMeta(s) == s {
return &equalStringMatcher{s: s, caseSensitive: true}, []string{s}
}
return nil, nil
}
multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates)
for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') {
// Split the string into the next literal and the remainder
subMatch := s[:end]
s = s[end+1:]
// break if any of the submatches are not literals
if regexp.QuoteMeta(subMatch) != subMatch {
return nil, nil
}
multiMatcher.add(subMatch)
}
// break if the remainder is not a literal
if regexp.QuoteMeta(s) != s {
return nil, nil
}
multiMatcher.add(s)
return multiMatcher, multiMatcher.setMatches()
} }
// optimizeConcatRegex returns literal prefix/suffix text that can be safely // optimizeConcatRegex returns literal prefix/suffix text that can be safely
@ -123,3 +398,540 @@ func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
return return
} }
// StringMatcher is a matcher that matches a string in place of a regular expression.
type StringMatcher interface {
Matches(s string) bool
}
// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher.
// It returns nil if the regexp is not supported.
func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher {
clearBeginEndText(re)
m := stringMatcherFromRegexpInternal(re)
m = optimizeEqualStringMatchers(m, minEqualMultiStringMatcherMapThreshold)
return m
}
func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher {
clearCapture(re)
switch re.Op {
case syntax.OpBeginText:
// Correctly handling the begin text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil
case syntax.OpEndText:
// Correctly handling the end text operator inside a regex is tricky,
// so in this case we fallback to the regex engine.
return nil
case syntax.OpPlus:
if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
return nil
}
return &anyNonEmptyStringMatcher{
matchNL: re.Sub[0].Op == syntax.OpAnyChar,
}
case syntax.OpStar:
if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL {
return nil
}
// If the newline is valid, than this matcher literally match any string (even empty).
if re.Sub[0].Op == syntax.OpAnyChar {
return trueMatcher{}
}
// Any string is fine (including an empty one), as far as it doesn't contain any newline.
return anyStringWithoutNewlineMatcher{}
case syntax.OpQuest:
// Only optimize for ".?".
if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) {
return nil
}
return &zeroOrOneCharacterStringMatcher{
matchNL: re.Sub[0].Op == syntax.OpAnyChar,
}
case syntax.OpEmptyMatch:
return emptyStringMatcher{}
case syntax.OpLiteral:
return &equalStringMatcher{
s: string(re.Rune),
caseSensitive: !isCaseInsensitive(re),
}
case syntax.OpAlternate:
or := make([]StringMatcher, 0, len(re.Sub))
for _, sub := range re.Sub {
m := stringMatcherFromRegexpInternal(sub)
if m == nil {
return nil
}
or = append(or, m)
}
return orStringMatcher(or)
case syntax.OpConcat:
clearCapture(re.Sub...)
if len(re.Sub) == 0 {
return emptyStringMatcher{}
}
if len(re.Sub) == 1 {
return stringMatcherFromRegexpInternal(re.Sub[0])
}
var left, right StringMatcher
// Let's try to find if there's a first and last any matchers.
if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest {
left = stringMatcherFromRegexpInternal(re.Sub[0])
if left == nil {
return nil
}
re.Sub = re.Sub[1:]
}
if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest {
right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1])
if right == nil {
return nil
}
re.Sub = re.Sub[:len(re.Sub)-1]
}
matches, matchesCaseSensitive := findSetMatchesInternal(re, "")
if len(matches) == 0 && len(re.Sub) == 2 {
// We have not find fixed set matches. We look for other known cases that
// we can optimize.
switch {
// Prefix is literal.
case right == nil && re.Sub[0].Op == syntax.OpLiteral:
right = stringMatcherFromRegexpInternal(re.Sub[1])
if right != nil {
matches = []string{string(re.Sub[0].Rune)}
matchesCaseSensitive = !isCaseInsensitive(re.Sub[0])
}
// Suffix is literal.
case left == nil && re.Sub[1].Op == syntax.OpLiteral:
left = stringMatcherFromRegexpInternal(re.Sub[0])
if left != nil {
matches = []string{string(re.Sub[1].Rune)}
matchesCaseSensitive = !isCaseInsensitive(re.Sub[1])
}
}
}
// Ensure we've found some literals to match (optionally with a left and/or right matcher).
// If not, then this optimization doesn't trigger.
if len(matches) == 0 {
return nil
}
// Use the right (and best) matcher based on what we've found.
switch {
// No left and right matchers (only fixed set matches).
case left == nil && right == nil:
// if there's no any matchers on both side it's a concat of literals
or := make([]StringMatcher, 0, len(matches))
for _, match := range matches {
or = append(or, &equalStringMatcher{
s: match,
caseSensitive: matchesCaseSensitive,
})
}
return orStringMatcher(or)
// Right matcher with 1 fixed set match.
case left == nil && len(matches) == 1:
return &literalPrefixStringMatcher{
prefix: matches[0],
prefixCaseSensitive: matchesCaseSensitive,
right: right,
}
// Left matcher with 1 fixed set match.
case right == nil && len(matches) == 1:
return &literalSuffixStringMatcher{
left: left,
suffix: matches[0],
suffixCaseSensitive: matchesCaseSensitive,
}
// We found literals in the middle. We can trigger the fast path only if
// the matches are case sensitive because containsStringMatcher doesn't
// support case insensitive.
case matchesCaseSensitive:
return &containsStringMatcher{
substrings: matches,
left: left,
right: right,
}
}
}
return nil
}
// containsStringMatcher matches a string if it contains any of the substrings.
// If left and right are not nil, it's a contains operation where left and right must match.
// If left is nil, it's a hasPrefix operation and right must match.
// Finally, if right is nil it's a hasSuffix operation and left must match.
type containsStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
// At least one of these strings must match in the "middle", between left and right matchers.
substrings []string
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *containsStringMatcher) Matches(s string) bool {
for _, substr := range m.substrings {
switch {
case m.right != nil && m.left != nil:
searchStartPos := 0
for {
pos := strings.Index(s[searchStartPos:], substr)
if pos < 0 {
break
}
// Since we started searching from searchStartPos, we have to add that offset
// to get the actual position of the substring inside the text.
pos += searchStartPos
// If both the left and right matchers match, then we can stop searching because
// we've found a match.
if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) {
return true
}
// Continue searching for another occurrence of the substring inside the text.
searchStartPos = pos + 1
}
case m.left != nil:
// If we have to check for characters on the left then we need to match a suffix.
if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) {
return true
}
case m.right != nil:
if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) {
return true
}
}
}
return false
}
// literalPrefixStringMatcher matches a string with the given literal prefix and right side matcher.
type literalPrefixStringMatcher struct {
prefix string
prefixCaseSensitive bool
// The matcher that must match the right side. Can be nil.
right StringMatcher
}
func (m *literalPrefixStringMatcher) Matches(s string) bool {
// Ensure the prefix matches.
if m.prefixCaseSensitive && !strings.HasPrefix(s, m.prefix) {
return false
}
if !m.prefixCaseSensitive && !hasPrefixCaseInsensitive(s, m.prefix) {
return false
}
// Ensure the right side matches.
return m.right.Matches(s[len(m.prefix):])
}
// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher.
type literalSuffixStringMatcher struct {
// The matcher that must match the left side. Can be nil.
left StringMatcher
suffix string
suffixCaseSensitive bool
}
func (m *literalSuffixStringMatcher) Matches(s string) bool {
// Ensure the suffix matches.
if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) {
return false
}
if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) {
return false
}
// Ensure the left side matches.
return m.left.Matches(s[:len(s)-len(m.suffix)])
}
// emptyStringMatcher matches an empty string.
type emptyStringMatcher struct{}
func (m emptyStringMatcher) Matches(s string) bool {
return len(s) == 0
}
// orStringMatcher matches any of the sub-matchers.
type orStringMatcher []StringMatcher
func (m orStringMatcher) Matches(s string) bool {
for _, matcher := range m {
if matcher.Matches(s) {
return true
}
}
return false
}
// equalStringMatcher matches a string exactly and support case insensitive.
type equalStringMatcher struct {
s string
caseSensitive bool
}
func (m *equalStringMatcher) Matches(s string) bool {
if m.caseSensitive {
return m.s == s
}
return strings.EqualFold(m.s, s)
}
type multiStringMatcherBuilder interface {
StringMatcher
add(s string)
setMatches() []string
}
func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize int) multiStringMatcherBuilder {
// If the estimated size is low enough, it's faster to use a slice instead of a map.
if estimatedSize < minEqualMultiStringMatcherMapThreshold {
return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)}
}
return &equalMultiStringMapMatcher{
values: make(map[string]struct{}, estimatedSize),
caseSensitive: caseSensitive,
}
}
// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values.
type equalMultiStringSliceMatcher struct {
values []string
caseSensitive bool
}
func (m *equalMultiStringSliceMatcher) add(s string) {
m.values = append(m.values, s)
}
func (m *equalMultiStringSliceMatcher) setMatches() []string {
return m.values
}
func (m *equalMultiStringSliceMatcher) Matches(s string) bool {
if m.caseSensitive {
for _, v := range m.values {
if s == v {
return true
}
}
} else {
for _, v := range m.values {
if strings.EqualFold(s, v) {
return true
}
}
}
return false
}
// equalMultiStringMapMatcher matches a string exactly against a map of valid values.
type equalMultiStringMapMatcher struct {
// values contains values to match a string against. If the matching is case insensitive,
// the values here must be lowercase.
values map[string]struct{}
caseSensitive bool
}
func (m *equalMultiStringMapMatcher) add(s string) {
if !m.caseSensitive {
s = strings.ToLower(s)
}
m.values[s] = struct{}{}
}
func (m *equalMultiStringMapMatcher) setMatches() []string {
if len(m.values) >= maxSetMatches {
return nil
}
matches := make([]string, 0, len(m.values))
for s := range m.values {
matches = append(matches, s)
}
return matches
}
func (m *equalMultiStringMapMatcher) Matches(s string) bool {
if !m.caseSensitive {
s = strings.ToLower(s)
}
_, ok := m.values[s]
return ok
}
// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string
// (including an empty one) as far as it doesn't contain any newline character.
type anyStringWithoutNewlineMatcher struct{}
func (m anyStringWithoutNewlineMatcher) Matches(s string) bool {
// We need to make sure it doesn't contain a newline. Since the newline is
// an ASCII character, we can use strings.IndexByte().
return strings.IndexByte(s, '\n') == -1
}
// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string.
type anyNonEmptyStringMatcher struct {
matchNL bool
}
func (m *anyNonEmptyStringMatcher) Matches(s string) bool {
if m.matchNL {
// It's OK if the string contains a newline so we just need to make
// sure it's non-empty.
return len(s) > 0
}
// We need to make sure it non-empty and doesn't contain a newline.
// Since the newline is an ASCII character, we can use strings.IndexByte().
return len(s) > 0 && strings.IndexByte(s, '\n') == -1
}
// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence
// of any character. The newline character is matches only if matchNL is set to true.
type zeroOrOneCharacterStringMatcher struct {
matchNL bool
}
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
// Zero or one.
if len(s) > 1 {
return false
}
// No need to check for the newline if the string is empty or matching a newline is OK.
if m.matchNL || len(s) == 0 {
return true
}
return s[0] != '\n'
}
// trueMatcher is a stringMatcher which matches any string (always returns true).
type trueMatcher struct{}
func (m trueMatcher) Matches(_ string) bool {
return true
}
// optimizeEqualStringMatchers optimize a specific case where all matchers are made by an
// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher). In
// this specific case, when we have many strings to match against we can use a map instead
// of iterating over the list of strings.
func optimizeEqualStringMatchers(input StringMatcher, threshold int) StringMatcher {
var (
caseSensitive bool
caseSensitiveSet bool
numValues int
)
// Analyse the input StringMatcher to count the number of occurrences
// and ensure all of them have the same case sensitivity.
analyseCallback := func(matcher *equalStringMatcher) bool {
// Ensure we don't have mixed case sensitivity.
if caseSensitiveSet && caseSensitive != matcher.caseSensitive {
return false
} else if !caseSensitiveSet {
caseSensitive = matcher.caseSensitive
caseSensitiveSet = true
}
numValues++
return true
}
if !findEqualStringMatchers(input, analyseCallback) {
return input
}
// If the number of values found is less than the threshold, then we should skip the optimization.
if numValues < threshold {
return input
}
// Parse again the input StringMatcher to extract all values and storing them.
// We can skip the case sensitivity check because we've already checked it and
// if the code reach this point then it means all matchers have the same case sensitivity.
multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues)
// Ignore the return value because we already iterated over the input StringMatcher
// and it was all good.
findEqualStringMatchers(input, func(matcher *equalStringMatcher) bool {
multiMatcher.add(matcher.s)
return true
})
return multiMatcher
}
// findEqualStringMatchers analyze the input StringMatcher and calls the callback for each
// equalStringMatcher found. Returns true if and only if the input StringMatcher is *only*
// composed by an alternation of equalStringMatcher.
func findEqualStringMatchers(input StringMatcher, callback func(matcher *equalStringMatcher) bool) bool {
orInput, ok := input.(orStringMatcher)
if !ok {
return false
}
for _, m := range orInput {
switch casted := m.(type) {
case orStringMatcher:
if !findEqualStringMatchers(m, callback) {
return false
}
case *equalStringMatcher:
if !callback(casted) {
return false
}
default:
// It's not an equal string matcher, so we have to stop searching
// cause this optimization can't be applied.
return false
}
}
return true
}
func hasPrefixCaseInsensitive(s, prefix string) bool {
return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
}
func hasSuffixCaseInsensitive(s, suffix string) bool {
return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
}

File diff suppressed because one or more lines are too long

View file

@ -115,6 +115,12 @@ func (e ErrStorage) Error() string {
return e.Err.Error() return e.Err.Error()
} }
// QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked.
type QueryEngine interface {
NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
}
// QueryLogger is an interface that can be used to log all the queries logged // QueryLogger is an interface that can be used to log all the queries logged
// by the engine. // by the engine.
type QueryLogger interface { type QueryLogger interface {
@ -1196,6 +1202,9 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
if prepSeries != nil { if prepSeries != nil {
bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si])
} }
// Don't add histogram size here because we only
// copy the pointer above, not the whole
// histogram.
ev.currentSamples++ ev.currentSamples++
if ev.currentSamples > ev.maxSamples { if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
@ -1221,7 +1230,6 @@ func (ev *evaluator) rangeEval(prepSeries func(labels.Labels, *EvalSeriesHelper)
if ev.currentSamples > ev.maxSamples { if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.samplesStats.UpdatePeak(ev.currentSamples)
// If this could be an instant query, shortcut so as not to change sort order. // If this could be an instant query, shortcut so as not to change sort order.
if ev.endTimestamp == ev.startTimestamp { if ev.endTimestamp == ev.startTimestamp {
@ -1540,13 +1548,12 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
histSamples := totalHPointSize(ss.Histograms) histSamples := totalHPointSize(ss.Histograms)
if len(ss.Floats)+histSamples > 0 { if len(ss.Floats)+histSamples > 0 {
if ev.currentSamples+len(ss.Floats)+histSamples <= ev.maxSamples { if ev.currentSamples+len(ss.Floats)+histSamples > ev.maxSamples {
mat = append(mat, ss)
prevSS = &mat[len(mat)-1]
ev.currentSamples += len(ss.Floats) + histSamples
} else {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
mat = append(mat, ss)
prevSS = &mat[len(mat)-1]
ev.currentSamples += len(ss.Floats) + histSamples
} }
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)
@ -1709,26 +1716,28 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
step++ step++
_, f, h, ok := ev.vectorSelectorSingle(it, e, ts) _, f, h, ok := ev.vectorSelectorSingle(it, e, ts)
if ok { if ok {
if ev.currentSamples < ev.maxSamples { if h == nil {
if h == nil { ev.currentSamples++
if ss.Floats == nil { ev.samplesStats.IncrementSamplesAtStep(step, 1)
ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) if ev.currentSamples > ev.maxSamples {
} ev.error(ErrTooManySamples(env))
ss.Floats = append(ss.Floats, FPoint{F: f, T: ts})
ev.currentSamples++
ev.samplesStats.IncrementSamplesAtStep(step, 1)
} else {
if ss.Histograms == nil {
ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps)
}
point := HPoint{H: h, T: ts}
ss.Histograms = append(ss.Histograms, point)
histSize := point.size()
ev.currentSamples += histSize
ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize))
} }
if ss.Floats == nil {
ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps)
}
ss.Floats = append(ss.Floats, FPoint{F: f, T: ts})
} else { } else {
ev.error(ErrTooManySamples(env)) point := HPoint{H: h, T: ts}
histSize := point.size()
ev.currentSamples += histSize
ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize))
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
if ss.Histograms == nil {
ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps)
}
ss.Histograms = append(ss.Histograms, point)
} }
} }
} }
@ -1856,7 +1865,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, annotations.Annotatio
panic(fmt.Errorf("unhandled expression of type: %T", expr)) panic(fmt.Errorf("unhandled expression of type: %T", expr))
} }
// reuseOrGetFPointSlices reuses the space from previous slice to create new slice if the former has lots of room. // reuseOrGetHPointSlices reuses the space from previous slice to create new slice if the former has lots of room.
// The previous slices capacity is adjusted so when it is re-used from the pool it doesn't overflow into the new one. // The previous slices capacity is adjusted so when it is re-used from the pool it doesn't overflow into the new one.
func reuseOrGetHPointSlices(prevSS *Series, numSteps int) (r []HPoint) { func reuseOrGetHPointSlices(prevSS *Series, numSteps int) (r []HPoint) {
if prevSS != nil && cap(prevSS.Histograms)-2*len(prevSS.Histograms) > 0 { if prevSS != nil && cap(prevSS.Histograms)-2*len(prevSS.Histograms) > 0 {
@ -2168,10 +2177,10 @@ loop:
histograms = histograms[:n] histograms = histograms[:n]
continue loop continue loop
} }
if ev.currentSamples >= ev.maxSamples { ev.currentSamples += histograms[n].size()
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.currentSamples += histograms[n].size()
} }
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, f := buf.At() t, f := buf.At()
@ -2180,10 +2189,10 @@ loop:
} }
// Values in the buffer are guaranteed to be smaller than maxt. // Values in the buffer are guaranteed to be smaller than maxt.
if t >= mintFloats { if t >= mintFloats {
if ev.currentSamples >= ev.maxSamples { ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.currentSamples++
if floats == nil { if floats == nil {
floats = getFPointSlice(16) floats = getFPointSlice(16)
} }
@ -2211,22 +2220,22 @@ loop:
histograms = histograms[:n] histograms = histograms[:n]
break break
} }
if ev.currentSamples >= ev.maxSamples { ev.currentSamples += histograms[n].size()
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
ev.currentSamples += histograms[n].size()
case chunkenc.ValFloat: case chunkenc.ValFloat:
t, f := it.At() t, f := it.At()
if t == maxt && !value.IsStaleNaN(f) { if t == maxt && !value.IsStaleNaN(f) {
if ev.currentSamples >= ev.maxSamples { ev.currentSamples++
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env)) ev.error(ErrTooManySamples(env))
} }
if floats == nil { if floats == nil {
floats = getFPointSlice(16) floats = getFPointSlice(16)
} }
floats = append(floats, FPoint{T: t, F: f}) floats = append(floats, FPoint{T: t, F: f})
ev.currentSamples++
} }
} }
ev.samplesStats.UpdatePeak(ev.currentSamples) ev.samplesStats.UpdatePeak(ev.currentSamples)

View file

@ -755,6 +755,7 @@ load 10s
metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100 metricWith3SampleEvery10Seconds{a="1",b="1"} 1+1x100
metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="2",b="2"} 1+1x100
metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100 metricWith3SampleEvery10Seconds{a="3",b="2"} 1+1x100
metricWith1HistogramEvery10Seconds {{schema:1 count:5 sum:20 buckets:[1 2 1 1]}}+{{schema:1 count:10 sum:5 buckets:[1 2 3 4]}}x100
`) `)
t.Cleanup(func() { storage.Close() }) t.Cleanup(func() { storage.Close() })
@ -795,6 +796,15 @@ load 10s
21000: 1, 21000: 1,
}, },
}, },
{
Query: "metricWith1HistogramEvery10Seconds",
Start: time.Unix(21, 0),
PeakSamples: 12,
TotalSamples: 12, // 1 histogram sample of size 12 / 10 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{
21000: 12,
},
},
{ {
// timestamp function has a special handling. // timestamp function has a special handling.
Query: "timestamp(metricWith1SampleEvery10Seconds)", Query: "timestamp(metricWith1SampleEvery10Seconds)",
@ -805,6 +815,15 @@ load 10s
21000: 1, 21000: 1,
}, },
}, },
{
Query: "timestamp(metricWith1HistogramEvery10Seconds)",
Start: time.Unix(21, 0),
PeakSamples: 13, // histogram size 12 + 1 extra because of timestamp
TotalSamples: 1, // 1 float sample (because of timestamp) / 10 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{
21000: 1,
},
},
{ {
Query: "metricWith1SampleEvery10Seconds", Query: "metricWith1SampleEvery10Seconds",
Start: time.Unix(22, 0), Start: time.Unix(22, 0),
@ -877,11 +896,20 @@ load 10s
201000: 6, 201000: 6,
}, },
}, },
{
Query: "metricWith1HistogramEvery10Seconds[60s]",
Start: time.Unix(201, 0),
PeakSamples: 72,
TotalSamples: 72, // 1 histogram (size 12) / 10 seconds * 60 seconds
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 72,
},
},
{ {
Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]", Query: "max_over_time(metricWith1SampleEvery10Seconds[59s])[20s:5s]",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 10, PeakSamples: 10,
TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 60/5 (using 59s so we always return 6 samples TotalSamples: 24, // (1 sample / 10 seconds * 60 seconds) * 20/5 (using 59s so we always return 6 samples
// as if we run a query on 00 looking back 60 seconds we will return 7 samples; // as if we run a query on 00 looking back 60 seconds we will return 7 samples;
// see next test). // see next test).
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
@ -892,12 +920,22 @@ load 10s
Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]", Query: "max_over_time(metricWith1SampleEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
PeakSamples: 11, PeakSamples: 11,
TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) + 2 as TotalSamples: 26, // (1 sample / 10 seconds * 60 seconds) * 4 + 2 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples. // max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 26, 201000: 26,
}, },
}, },
{
Query: "max_over_time(metricWith1HistogramEvery10Seconds[60s])[20s:5s]",
Start: time.Unix(201, 0),
PeakSamples: 72,
TotalSamples: 312, // (1 histogram (size 12) / 10 seconds * 60 seconds) * 4 + 2 * 12 as
// max_over_time(metricWith1SampleEvery10Seconds[60s]) @ 190 and 200 will return 7 samples.
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 312,
},
},
{ {
Query: "metricWith1SampleEvery10Seconds[60s] @ 30", Query: "metricWith1SampleEvery10Seconds[60s] @ 30",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
@ -907,6 +945,15 @@ load 10s
201000: 4, 201000: 4,
}, },
}, },
{
Query: "metricWith1HistogramEvery10Seconds[60s] @ 30",
Start: time.Unix(201, 0),
PeakSamples: 48,
TotalSamples: 48, // @ modifier force the evaluation to at 30 seconds - So it brings 4 datapoints (0, 10, 20, 30 seconds) * 1 series
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 48,
},
},
{ {
Query: "sum(max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))", Query: "sum(max_over_time(metricWith3SampleEvery10Seconds[60s] @ 30))",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
@ -1035,13 +1082,42 @@ load 10s
}, },
}, },
{ {
// timestamp function as a special handling Query: `metricWith1HistogramEvery10Seconds`,
Start: time.Unix(204, 0),
End: time.Unix(223, 0),
Interval: 5 * time.Second,
PeakSamples: 48,
TotalSamples: 48, // 1 histogram (size 12) per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{
204000: 12, // aligned to the step time, not the sample time
209000: 12,
214000: 12,
219000: 12,
},
},
{
// timestamp function has a special handling
Query: "timestamp(metricWith1SampleEvery10Seconds)", Query: "timestamp(metricWith1SampleEvery10Seconds)",
Start: time.Unix(201, 0), Start: time.Unix(201, 0),
End: time.Unix(220, 0), End: time.Unix(220, 0),
Interval: 5 * time.Second, Interval: 5 * time.Second,
PeakSamples: 5, PeakSamples: 5,
TotalSamples: 4, // (1 sample / 10 seconds) * 4 steps TotalSamples: 4, // 1 sample per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 1,
206000: 1,
211000: 1,
216000: 1,
},
},
{
// timestamp function has a special handling
Query: "timestamp(metricWith1HistogramEvery10Seconds)",
Start: time.Unix(201, 0),
End: time.Unix(220, 0),
Interval: 5 * time.Second,
PeakSamples: 16,
TotalSamples: 4, // 1 sample per query * 4 steps
TotalSamplesPerStep: stats.TotalSamplesPerStep{ TotalSamplesPerStep: stats.TotalSamplesPerStep{
201000: 1, 201000: 1,
206000: 1, 206000: 1,
@ -3438,7 +3514,39 @@ func TestNativeHistogram_HistogramStdDevVar(t *testing.T) {
}, },
NegativeBuckets: []int64{1, 0}, NegativeBuckets: []int64{1, 0},
}, },
stdVar: 1544.8582535368798, // actual variance: 1738.4082 stdVar: 1844.4651144196398, // actual variance: 1738.4082
},
{
name: "-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3",
h: &histogram.Histogram{
Count: 10,
ZeroCount: 0,
Sum: -112946,
Schema: 0,
NegativeSpans: []histogram.Span{
{Offset: 2, Length: 3},
{Offset: 1, Length: 2},
{Offset: 2, Length: 1},
{Offset: 3, Length: 1},
{Offset: 2, Length: 1},
},
NegativeBuckets: []int64{1, 0, 0, 0, 0, 2, -2, 0},
},
stdVar: 759352122.1939945, // actual variance: 882690990
},
{
name: "-10 x10",
h: &histogram.Histogram{
Count: 10,
ZeroCount: 0,
Sum: -100,
Schema: 0,
NegativeSpans: []histogram.Span{
{Offset: 4, Length: 1},
},
NegativeBuckets: []int64{10},
},
stdVar: 1.725830020304794, // actual variance: 0
}, },
{ {
name: "-50, -8, 0, 3, 8, 9, 100, NaN", name: "-50, -8, 0, 3, 8, 9, 100, NaN",

View file

@ -1111,11 +1111,17 @@ func funcHistogramStdDev(vals []parser.Value, args parser.Expressions, enh *Eval
it := sample.H.AllBucketIterator() it := sample.H.AllBucketIterator()
for it.Next() { for it.Next() {
bucket := it.At() bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64 var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper { if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0 val = 0
} else { } else {
val = math.Sqrt(bucket.Upper * bucket.Lower) val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
} }
delta := val - mean delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)
@ -1144,11 +1150,17 @@ func funcHistogramStdVar(vals []parser.Value, args parser.Expressions, enh *Eval
it := sample.H.AllBucketIterator() it := sample.H.AllBucketIterator()
for it.Next() { for it.Next() {
bucket := it.At() bucket := it.At()
if bucket.Count == 0 {
continue
}
var val float64 var val float64
if bucket.Lower <= 0 && 0 <= bucket.Upper { if bucket.Lower <= 0 && 0 <= bucket.Upper {
val = 0 val = 0
} else { } else {
val = math.Sqrt(bucket.Upper * bucket.Lower) val = math.Sqrt(bucket.Upper * bucket.Lower)
if bucket.Upper < 0 {
val = -val
}
} }
delta := val - mean delta := val - mean
variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance) variance, cVariance = kahanSumInc(bucket.Count*delta*delta, variance, cVariance)

View file

@ -3706,7 +3706,31 @@ func TestParseExpressions(t *testing.T) {
if !test.fail { if !test.fail {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, test.expected, expr, "error on input '%s'", test.input) expected := test.expected
// The FastRegexMatcher is not comparable with a deep equal, so only compare its String() version.
if actualVector, ok := expr.(*VectorSelector); ok {
require.IsType(t, &VectorSelector{}, test.expected, "error on input '%s'", test.input)
expectedVector := test.expected.(*VectorSelector)
require.Len(t, actualVector.LabelMatchers, len(expectedVector.LabelMatchers), "error on input '%s'", test.input)
for i := 0; i < len(actualVector.LabelMatchers); i++ {
expectedMatcher := expectedVector.LabelMatchers[i].String()
actualMatcher := actualVector.LabelMatchers[i].String()
require.Equal(t, expectedMatcher, actualMatcher, "unexpected label matcher '%s' on input '%s'", actualMatcher, test.input)
}
// Make a shallow copy of the expected expr (because the test cases are defined in a global variable)
// and then reset the LabelMatcher to not compared them with the following deep equal.
expectedCopy := *expectedVector
expectedCopy.LabelMatchers = nil
expected = &expectedCopy
actualVector.LabelMatchers = nil
}
require.Equal(t, expected, expr, "error on input '%s'", test.input)
} else { } else {
require.Error(t, err) require.Error(t, err)
require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error()) require.Contains(t, err.Error(), test.errMsg, "unexpected error on input '%s', expected '%s', got '%s'", test.input, test.errMsg, err.Error())

View file

@ -46,6 +46,7 @@ var (
patSpace = regexp.MustCompile("[\t ]+") patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`) patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
patEvalRange = regexp.MustCompile(`^eval(?:_(fail))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`)
) )
const ( const (
@ -72,7 +73,7 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
} }
// RunBuiltinTests runs an acceptance test suite against the provided engine. // RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine engineQuerier) { func RunBuiltinTests(t *testing.T, engine QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false }) t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
@ -89,11 +90,19 @@ func RunBuiltinTests(t *testing.T, engine engineQuerier) {
} }
// RunTest parses and runs the test against the provided engine. // RunTest parses and runs the test against the provided engine.
func RunTest(t testutil.T, input string, engine engineQuerier) { func RunTest(t testutil.T, input string, engine QueryEngine) {
test, err := newTest(t, input) require.NoError(t, runTest(t, input, engine))
require.NoError(t, err) }
func runTest(t testutil.T, input string, engine QueryEngine) error {
test, err := newTest(t, input)
// Why do this before checking err? newTest() can create the test storage and then return an error,
// and we want to make sure to clean that up to avoid leaking goroutines.
defer func() { defer func() {
if test == nil {
return
}
if test.storage != nil { if test.storage != nil {
test.storage.Close() test.storage.Close()
} }
@ -102,11 +111,19 @@ func RunTest(t testutil.T, input string, engine engineQuerier) {
} }
}() }()
for _, cmd := range test.cmds { if err != nil {
// TODO(fabxc): aggregate command errors, yield diffs for result return err
// comparison errors.
require.NoError(t, test.exec(cmd, engine))
} }
for _, cmd := range test.cmds {
if err := test.exec(cmd, engine); err != nil {
// TODO(fabxc): aggregate command errors, yield diffs for result
// comparison errors.
return err
}
}
return nil
} }
// test is a sequence of read and write commands that are run // test is a sequence of read and write commands that are run
@ -137,11 +154,6 @@ func newTest(t testutil.T, input string) (*test, error) {
//go:embed testdata //go:embed testdata
var testsFs embed.FS var testsFs embed.FS
type engineQuerier interface {
NewRangeQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, start, end time.Time, interval time.Duration) (Query, error)
NewInstantQuery(ctx context.Context, q storage.Queryable, opts QueryOpts, qs string, ts time.Time) (Query, error)
}
func raise(line int, format string, v ...interface{}) error { func raise(line int, format string, v ...interface{}) error {
return &parser.ParseErr{ return &parser.ParseErr{
LineOffset: line, LineOffset: line,
@ -188,15 +200,26 @@ func parseSeries(defLine string, line int) (labels.Labels, []parser.SequenceValu
} }
func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
if !patEvalInstant.MatchString(lines[i]) { instantParts := patEvalInstant.FindStringSubmatch(lines[i])
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>") rangeParts := patEvalRange.FindStringSubmatch(lines[i])
if instantParts == nil && rangeParts == nil {
return i, nil, raise(i, "invalid evaluation command. Must be either 'eval[_fail|_ordered] instant [at <offset:duration>] <query>' or 'eval[_fail] range from <from> to <to> step <step> <query>'")
} }
parts := patEvalInstant.FindStringSubmatch(lines[i])
var ( isInstant := instantParts != nil
mod = parts[1]
at = parts[2] var mod string
expr = parts[3] var expr string
)
if isInstant {
mod = instantParts[1]
expr = instantParts[3]
} else {
mod = rangeParts[1]
expr = rangeParts[5]
}
_, err := parser.ParseExpr(expr) _, err := parser.ParseExpr(expr)
if err != nil { if err != nil {
parser.EnrichParseError(err, func(parseErr *parser.ParseErr) { parser.EnrichParseError(err, func(parseErr *parser.ParseErr) {
@ -209,15 +232,54 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
return i, nil, err return i, nil, err
} }
offset, err := model.ParseDuration(at) formatErr := func(format string, args ...any) error {
if err != nil { combinedArgs := []any{expr, i + 1}
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
} combinedArgs = append(combinedArgs, args...)
ts := testStartTime.Add(time.Duration(offset)) return fmt.Errorf("error in eval %s (line %v): "+format, combinedArgs...)
}
var cmd *evalCmd
if isInstant {
at := instantParts[2]
offset, err := model.ParseDuration(at)
if err != nil {
return i, nil, formatErr("invalid timestamp definition %q: %s", at, err)
}
ts := testStartTime.Add(time.Duration(offset))
cmd = newInstantEvalCmd(expr, ts, i+1)
} else {
from := rangeParts[2]
to := rangeParts[3]
step := rangeParts[4]
parsedFrom, err := model.ParseDuration(from)
if err != nil {
return i, nil, formatErr("invalid start timestamp definition %q: %s", from, err)
}
parsedTo, err := model.ParseDuration(to)
if err != nil {
return i, nil, formatErr("invalid end timestamp definition %q: %s", to, err)
}
if parsedTo < parsedFrom {
return i, nil, formatErr("invalid test definition, end timestamp (%s) is before start timestamp (%s)", to, from)
}
parsedStep, err := model.ParseDuration(step)
if err != nil {
return i, nil, formatErr("invalid step definition %q: %s", step, err)
}
cmd = newRangeEvalCmd(expr, testStartTime.Add(time.Duration(parsedFrom)), testStartTime.Add(time.Duration(parsedTo)), time.Duration(parsedStep), i+1)
}
cmd := newEvalCmd(expr, ts, i+1)
switch mod { switch mod {
case "ordered": case "ordered":
// Ordered results are not supported for range queries, but the regex for range query commands does not allow
// asserting an ordered result, so we don't need to do any error checking here.
cmd.ordered = true cmd.ordered = true
case "fail": case "fail":
cmd.fail = true cmd.fail = true
@ -240,8 +302,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) {
} }
// Currently, we are not expecting any matrices. // Currently, we are not expecting any matrices.
if len(vals) > 1 { if len(vals) > 1 && isInstant {
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed") return i, nil, formatErr("expecting multiple values in instant evaluation not allowed")
} }
cmd.expectMetric(j, metric, vals...) cmd.expectMetric(j, metric, vals...)
} }
@ -375,8 +437,11 @@ func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
type evalCmd struct { type evalCmd struct {
expr string expr string
start time.Time start time.Time
end time.Time
step time.Duration
line int line int
isRange bool // if false, instant query
fail, ordered bool fail, ordered bool
metrics map[uint64]labels.Labels metrics map[uint64]labels.Labels
@ -392,7 +457,7 @@ func (e entry) String() string {
return fmt.Sprintf("%d: %s", e.pos, e.vals) return fmt.Sprintf("%d: %s", e.pos, e.vals)
} }
func newEvalCmd(expr string, start time.Time, line int) *evalCmd { func newInstantEvalCmd(expr string, start time.Time, line int) *evalCmd {
return &evalCmd{ return &evalCmd{
expr: expr, expr: expr,
start: start, start: start,
@ -403,6 +468,20 @@ func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
} }
} }
func newRangeEvalCmd(expr string, start, end time.Time, step time.Duration, line int) *evalCmd {
return &evalCmd{
expr: expr,
start: start,
end: end,
step: step,
line: line,
isRange: true,
metrics: map[uint64]labels.Labels{},
expected: map[uint64]entry{},
}
}
func (ev *evalCmd) String() string { func (ev *evalCmd) String() string {
return "eval" return "eval"
} }
@ -425,7 +504,77 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
func (ev *evalCmd) compareResult(result parser.Value) error { func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) { switch val := result.(type) {
case Matrix: case Matrix:
return errors.New("received range result on instant evaluation") if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix")
}
if err := assertMatrixSorted(val); err != nil {
return err
}
seen := map[uint64]bool{}
for _, s := range val {
hash := s.Metric.Hash()
if _, ok := ev.metrics[hash]; !ok {
return fmt.Errorf("unexpected metric %s in result", s.Metric)
}
seen[hash] = true
exp := ev.expected[hash]
var expectedFloats []FPoint
var expectedHistograms []HPoint
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
if ts.After(ev.end) {
return fmt.Errorf("expected %v points for %s, but query time range cannot return this many points", len(exp.vals), ev.metrics[hash])
}
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
if e.Histogram != nil {
expectedHistograms = append(expectedHistograms, HPoint{T: t, H: e.Histogram})
} else if !e.Omitted {
expectedFloats = append(expectedFloats, FPoint{T: t, F: e.Value})
}
}
if len(expectedFloats) != len(s.Floats) || len(expectedHistograms) != len(s.Histograms) {
return fmt.Errorf("expected %v float points and %v histogram points for %s, but got %s", len(expectedFloats), len(expectedHistograms), ev.metrics[hash], formatSeriesResult(s))
}
for i, expected := range expectedFloats {
actual := s.Floats[i]
if expected.T != actual.T {
return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !almostEqual(actual.F, expected.F, defaultEpsilon) {
return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
}
}
for i, expected := range expectedHistograms {
actual := s.Histograms[i]
if expected.T != actual.T {
return fmt.Errorf("expected histogram value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !actual.H.Equals(expected.H) {
return fmt.Errorf("expected histogram value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.H, actual.H, formatSeriesResult(s))
}
}
}
for hash := range ev.expected {
if !seen[hash] {
return fmt.Errorf("expected metric %s not found", ev.metrics[hash])
}
}
case Vector: case Vector:
seen := map[uint64]bool{} seen := map[uint64]bool{}
@ -440,7 +589,13 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
} }
exp0 := exp.vals[0] exp0 := exp.vals[0]
expH := exp0.Histogram expH := exp0.Histogram
if (expH == nil) != (v.H == nil) || (expH != nil && !expH.Equals(v.H)) { if expH == nil && v.H != nil {
return fmt.Errorf("expected float value %v for %s but got histogram %s", exp0, v.Metric, HistogramTestExpression(v.H))
}
if expH != nil && v.H == nil {
return fmt.Errorf("expected histogram %s for %s but got float value %v", HistogramTestExpression(expH), v.Metric, v.F)
}
if expH != nil && !expH.Equals(v.H) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H)) return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
} }
if !almostEqual(exp0.Value, v.F, defaultEpsilon) { if !almostEqual(exp0.Value, v.F, defaultEpsilon) {
@ -477,6 +632,21 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return nil return nil
} }
func formatSeriesResult(s Series) string {
floatPlural := "s"
histogramPlural := "s"
if len(s.Floats) == 1 {
floatPlural = ""
}
if len(s.Histograms) == 1 {
histogramPlural = ""
}
return fmt.Sprintf("%v float point%s %v and %v histogram point%s %v", len(s.Floats), floatPlural, s.Floats, len(s.Histograms), histogramPlural, s.Histograms)
}
// HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil. // HistogramTestExpression returns TestExpression() for the given histogram or "" if the histogram is nil.
func HistogramTestExpression(h *histogram.FloatHistogram) string { func HistogramTestExpression(h *histogram.FloatHistogram) string {
if h != nil { if h != nil {
@ -561,7 +731,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
} }
// exec processes a single step of the test. // exec processes a single step of the test.
func (t *test) exec(tc testCommand, engine engineQuerier) error { func (t *test) exec(tc testCommand, engine QueryEngine) error {
switch cmd := tc.(type) { switch cmd := tc.(type) {
case *clearCmd: case *clearCmd:
t.clear() t.clear()
@ -578,74 +748,7 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
} }
case *evalCmd: case *evalCmd:
queries, err := atModifierTestCases(cmd.expr, cmd.start) return t.execEval(cmd, engine)
if err != nil {
return err
}
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
for _, iq := range queries {
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
if err != nil {
return err
}
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
continue
}
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
}
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return err
}
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Ordering isn't defined for range queries.
continue
}
mat := rangeRes.Value.(Matrix)
vec := make(Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
for _, point := range series.Floats {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
break
}
}
for _, point := range series.Histograms {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
break
}
}
}
if _, ok := res.Value.(Scalar); ok {
err = cmd.compareResult(Scalar{V: vec[0].F})
} else {
err = cmd.compareResult(vec)
}
if err != nil {
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
}
}
default: default:
panic("promql.Test.exec: unknown test command type") panic("promql.Test.exec: unknown test command type")
@ -653,6 +756,132 @@ func (t *test) exec(tc testCommand, engine engineQuerier) error {
return nil return nil
} }
func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error {
if cmd.isRange {
return t.execRangeEval(cmd, engine)
}
return t.execInstantEval(cmd, engine)
}
func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error {
q, err := engine.NewRangeQuery(t.context, t.storage, nil, cmd.expr, cmd.start, cmd.end, cmd.step)
if err != nil {
return err
}
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
return nil
}
return fmt.Errorf("error evaluating query %q (line %d): %w", cmd.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
}
defer q.Close()
if err := cmd.compareResult(res.Value); err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, cmd.expr, cmd.line, err)
}
return nil
}
func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
queries, err := atModifierTestCases(cmd.expr, cmd.start)
if err != nil {
return err
}
queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...)
for _, iq := range queries {
q, err := engine.NewInstantQuery(t.context, t.storage, nil, iq.expr, iq.evalTime)
if err != nil {
return err
}
defer q.Close()
res := q.Exec(t.context)
if res.Err != nil {
if cmd.fail {
continue
}
return fmt.Errorf("error evaluating query %q (line %d): %w", iq.expr, cmd.line, res.Err)
}
if res.Err == nil && cmd.fail {
return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line)
}
err = cmd.compareResult(res.Value)
if err != nil {
return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err)
}
// Check query returns same result in range mode,
// by checking against the middle step.
q, err = engine.NewRangeQuery(t.context, t.storage, nil, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute)
if err != nil {
return err
}
rangeRes := q.Exec(t.context)
if rangeRes.Err != nil {
return fmt.Errorf("error evaluating query %q (line %d) in range mode: %w", iq.expr, cmd.line, rangeRes.Err)
}
defer q.Close()
if cmd.ordered {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
continue
}
mat := rangeRes.Value.(Matrix)
if err := assertMatrixSorted(mat); err != nil {
return err
}
vec := make(Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
for _, point := range series.Floats {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
break
}
}
for _, point := range series.Histograms {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
break
}
}
}
if _, ok := res.Value.(Scalar); ok {
err = cmd.compareResult(Scalar{V: vec[0].F})
} else {
err = cmd.compareResult(vec)
}
if err != nil {
return fmt.Errorf("error in %s %s (line %d) range mode: %w", cmd, iq.expr, cmd.line, err)
}
}
return nil
}
func assertMatrixSorted(m Matrix) error {
if len(m) <= 1 {
return nil
}
for i, s := range m[:len(m)-1] {
nextIndex := i + 1
nextMetric := m[nextIndex].Metric
if labels.Compare(s.Metric, nextMetric) > 0 {
return fmt.Errorf("matrix results should always be sorted by labels, but matrix is not sorted: series at index %v with labels %s sorts before series at index %v with labels %s", nextIndex, nextMetric, i, s.Metric)
}
}
return nil
}
// clear the current test storage of all inserted samples. // clear the current test storage of all inserted samples.
func (t *test) clear() { func (t *test) clear() {
if t.storage != nil { if t.storage != nil {
@ -704,8 +933,6 @@ func parseNumber(s string) (float64, error) {
// LazyLoader lazily loads samples into storage. // LazyLoader lazily loads samples into storage.
// This is specifically implemented for unit testing of rules. // This is specifically implemented for unit testing of rules.
type LazyLoader struct { type LazyLoader struct {
testutil.T
loadCmd *loadCmd loadCmd *loadCmd
storage storage.Storage storage storage.Storage
@ -727,13 +954,15 @@ type LazyLoaderOpts struct {
} }
// NewLazyLoader returns an initialized empty LazyLoader. // NewLazyLoader returns an initialized empty LazyLoader.
func NewLazyLoader(t testutil.T, input string, opts LazyLoaderOpts) (*LazyLoader, error) { func NewLazyLoader(input string, opts LazyLoaderOpts) (*LazyLoader, error) {
ll := &LazyLoader{ ll := &LazyLoader{
T: t,
opts: opts, opts: opts,
} }
err := ll.parse(input) err := ll.parse(input)
ll.clear() if err != nil {
return nil, err
}
err = ll.clear()
return ll, err return ll, err
} }
@ -761,15 +990,20 @@ func (ll *LazyLoader) parse(input string) error {
} }
// clear the current test storage of all inserted samples. // clear the current test storage of all inserted samples.
func (ll *LazyLoader) clear() { func (ll *LazyLoader) clear() error {
if ll.storage != nil { if ll.storage != nil {
err := ll.storage.Close() if err := ll.storage.Close(); err != nil {
require.NoError(ll.T, err, "Unexpected error while closing test storage.") return fmt.Errorf("closing test storage: %w", err)
}
} }
if ll.cancelCtx != nil { if ll.cancelCtx != nil {
ll.cancelCtx() ll.cancelCtx()
} }
ll.storage = teststorage.New(ll) var err error
ll.storage, err = teststorage.NewWithError()
if err != nil {
return err
}
opts := EngineOpts{ opts := EngineOpts{
Logger: nil, Logger: nil,
@ -783,6 +1017,7 @@ func (ll *LazyLoader) clear() {
ll.queryEngine = NewEngine(opts) ll.queryEngine = NewEngine(opts)
ll.context, ll.cancelCtx = context.WithCancel(context.Background()) ll.context, ll.cancelCtx = context.WithCancel(context.Background())
return nil
} }
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds). // appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
@ -836,8 +1071,7 @@ func (ll *LazyLoader) Storage() storage.Storage {
} }
// Close closes resources associated with the LazyLoader. // Close closes resources associated with the LazyLoader.
func (ll *LazyLoader) Close() { func (ll *LazyLoader) Close() error {
ll.cancelCtx() ll.cancelCtx()
err := ll.storage.Close() return ll.storage.Close()
require.NoError(ll.T, err, "Unexpected error while closing test storage.")
} }

View file

@ -110,7 +110,7 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
suite, err := NewLazyLoader(t, c.loadString, LazyLoaderOpts{}) suite, err := NewLazyLoader(c.loadString, LazyLoaderOpts{})
require.NoError(t, err) require.NoError(t, err)
defer suite.Close() defer suite.Close()
@ -156,3 +156,354 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
} }
} }
} }
func TestRunTest(t *testing.T) {
testData := `
load 5m
http_requests{job="api-server", instance="0", group="production"} 0+10x10
http_requests{job="api-server", instance="1", group="production"} 0+20x10
http_requests{job="api-server", instance="0", group="canary"} 0+30x10
http_requests{job="api-server", instance="1", group="canary"} 0+40x10
`
testCases := map[string]struct {
input string
expectedError string
}{
"instant query with expected float result": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
`,
},
"instant query with unexpected float result": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 80
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 80 for {group="canary"} but got 70`,
},
"instant query with expected histogram result": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
eval instant at 0 testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
`,
},
"instant query with unexpected histogram result": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}
eval instant at 0 testmetric
testmetric {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
`,
expectedError: `error in eval testmetric (line 5): expected {{schema:-1 count:1 sum:6 offset:1 buckets:[1]}} for {__name__="testmetric"} but got {{schema:-1 count:1 sum:4 offset:1 buckets:[1]}}`,
},
"instant query with float value returned when histogram expected": {
input: `
load 5m
testmetric 2
eval instant at 0 testmetric
testmetric {{}}
`,
expectedError: `error in eval testmetric (line 5): expected histogram {{}} for {__name__="testmetric"} but got float value 2`,
},
"instant query with histogram returned when float expected": {
input: `
load 5m
testmetric {{}}
eval instant at 0 testmetric
testmetric 2
`,
expectedError: `error in eval testmetric (line 5): expected float value 2.000000 for {__name__="testmetric"} but got histogram {{}}`,
},
"instant query, but result has an unexpected series": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result`,
},
"instant query, but result is missing a series": {
input: testData + `
eval instant at 5m sum by (group) (http_requests)
{group="production"} 30
{group="canary"} 70
{group="test"} 100
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} with 3: [100.000000] not found`,
},
"instant query expected to fail, and query fails": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail instant at 0m ceil({__name__=~'testmetric1|testmetric2'})
`,
},
"instant query expected to fail, but query succeeds": {
input: `eval_fail instant at 0s vector(0)`,
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
},
"instant query with results expected to match provided order, and result is in expected order": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
http_requests{group="canary", instance="1", job="api-server"} 400
`,
},
"instant query with results expected to match provided order, but result is out of order": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="1", job="api-server"} 400
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): expected metric {__name__="http_requests", group="canary", instance="0", job="api-server"} with [300.000000] at position 4 but was at 3`,
},
"instant query with results expected to match provided order, but result has an unexpected series": {
input: testData + `
eval_ordered instant at 50m sort(http_requests)
http_requests{group="production", instance="0", job="api-server"} 100
http_requests{group="production", instance="1", job="api-server"} 200
http_requests{group="canary", instance="0", job="api-server"} 300
`,
expectedError: `error in eval sort(http_requests) (line 8): unexpected metric {__name__="http_requests", group="canary", instance="1", job="api-server"} in result`,
},
"instant query with invalid timestamp": {
input: `eval instant at abc123 vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid timestamp definition "abc123": not a valid duration string: "abc123"`,
},
"range query with expected result": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 70 140
`,
},
"range query with unexpected float value": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 80 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected float value at index 1 (t=300000) for {group="canary"} to be 80, but got 70 (result has 3 float points [0 @[0] 70 @[300000] 140 @[600000]] and 0 histogram points [])`,
},
"range query with expected histogram values": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
eval range from 0 to 10m step 5m testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
`,
},
"range query with unexpected histogram value": {
input: `
load 5m
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:5 count:1 buckets:[1] offset:1}} {{schema:-1 sum:6 count:1 buckets:[1] offset:1}}
eval range from 0 to 10m step 5m testmetric
testmetric {{schema:-1 sum:4 count:1 buckets:[1] offset:1}} {{schema:-1 sum:7 count:1 buckets:[1] offset:1}} {{schema:-1 sum:8 count:1 buckets:[1] offset:1}}
`,
expectedError: `error in eval testmetric (line 5): expected histogram value at index 1 (t=300000) for {__name__="testmetric"} to be {count:1, sum:7, (1,4]:1}, but got {count:1, sum:5, (1,4]:1} (result has 0 float points [] and 3 histogram points [{count:1, sum:4, (1,4]:1} @[0] {count:1, sum:5, (1,4]:1} @[300000] {count:1, sum:6, (1,4]:1} @[600000]])`,
},
"range query with too many points for query time range": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60 90
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 4 points for {group="production"}, but query time range cannot return this many points`,
},
"range query with missing point in result": {
input: `
load 5m
testmetric 5
eval range from 0 to 6m step 6m testmetric
testmetric 5 10
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 1 float point [5 @[0]] and 0 histogram points []`,
},
"range query with extra point in result": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30
{group="canary"} 0 70 140
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected 2 float points and 0 histogram points for {group="production"}, but got 3 float points [0 @[0] 30 @[300000] 60 @[600000]] and 0 histogram points []`,
},
"range query, but result has an unexpected series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): unexpected metric {group="canary"} in result`,
},
"range query, but result is missing a series": {
input: testData + `
eval range from 0 to 10m step 5m sum by (group) (http_requests)
{group="production"} 0 30 60
{group="canary"} 0 70 140
{group="test"} 0 100 200
`,
expectedError: `error in eval sum by (group) (http_requests) (line 8): expected metric {group="test"} not found`,
},
"range query expected to fail, and query fails": {
input: `
load 5m
testmetric1{src="a",dst="b"} 0
testmetric2{src="a",dst="b"} 1
eval_fail range from 0 to 10m step 5m ceil({__name__=~'testmetric1|testmetric2'})
`,
},
"range query expected to fail, but query succeeds": {
input: `eval_fail range from 0 to 10m step 5m vector(0)`,
expectedError: `expected error evaluating query "vector(0)" (line 1) but got none`,
},
"range query with from and to timestamps in wrong order": {
input: `eval range from 10m to 9m step 5m vector(0)`,
expectedError: `error in eval vector(0) (line 1): invalid test definition, end timestamp (9m) is before start timestamp (10m)`,
},
"range query with sparse output": {
input: `
load 6m
testmetric 1 _ 3
eval range from 0 to 18m step 6m testmetric
testmetric 1 _ 3
`,
},
"range query with float value returned when no value expected": {
input: `
load 6m
testmetric 1 2 3
eval range from 0 to 18m step 6m testmetric
testmetric 1 _ 3
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 3 float points [1 @[0] 2 @[360000] 3 @[720000]] and 0 histogram points []`,
},
"range query with float value returned when histogram expected": {
input: `
load 5m
testmetric 2 3
eval range from 0 to 5m step 5m testmetric
testmetric {{}} {{}}
`,
expectedError: `error in eval testmetric (line 5): expected 0 float points and 2 histogram points for {__name__="testmetric"}, but got 2 float points [2 @[0] 3 @[300000]] and 0 histogram points []`,
},
"range query with histogram returned when float expected": {
input: `
load 5m
testmetric {{}} {{}}
eval range from 0 to 5m step 5m testmetric
testmetric 2 3
`,
expectedError: `error in eval testmetric (line 5): expected 2 float points and 0 histogram points for {__name__="testmetric"}, but got 0 float points [] and 2 histogram points [{count:0, sum:0} @[0] {count:0, sum:0} @[300000]]`,
},
"range query with expected mixed results": {
input: `
load 6m
testmetric{group="a"} {{}} _ _
testmetric{group="b"} _ _ 3
eval range from 0 to 12m step 6m sum(testmetric)
{} {{}} _ 3
`,
},
"range query with mixed results and incorrect values": {
input: `
load 5m
testmetric 3 {{}}
eval range from 0 to 5m step 5m testmetric
testmetric {{}} 3
`,
expectedError: `error in eval testmetric (line 5): expected float value at index 0 for {__name__="testmetric"} to have timestamp 300000, but it had timestamp 0 (result has 1 float point [3 @[0]] and 1 histogram point [{count:0, sum:0} @[300000]])`,
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := runTest(t, testCase.input, newTestEngine())
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, testCase.expectedError)
}
})
}
}
func TestAssertMatrixSorted(t *testing.T) {
testCases := map[string]struct {
matrix Matrix
expectedError string
}{
"empty matrix": {
matrix: Matrix{},
},
"matrix with one series": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
},
},
"matrix with two series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
},
},
"matrix with two series, series in reverse order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_1")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 1 with labels {the_label="value_1"} sorts before series at index 0 with labels {the_label="value_2"}`,
},
"matrix with three series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
},
},
"matrix with three series, series not in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 2 with labels {the_label="value_2"} sorts before series at index 1 with labels {the_label="value_3"}`,
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := assertMatrixSorted(testCase.matrix)
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.EqualError(t, err, testCase.expectedError)
}
})
}
}

View file

@ -546,13 +546,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
} }
} }
if numOutOfOrder > 0 { if numOutOfOrder > 0 {
level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder) level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
} }
if numTooOld > 0 { if numTooOld > 0 {
level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "numDropped", numTooOld) level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
} }
if numDuplicates > 0 { if numDuplicates > 0 {
level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates) level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
} }
for metric, lset := range g.seriesInPreviousEval[i] { for metric, lset := range g.seriesInPreviousEval[i] {

View file

@ -43,7 +43,7 @@ type QueryFunc func(ctx context.Context, q string, t time.Time) (promql.Vector,
// EngineQueryFunc returns a new query function that executes instant queries against // EngineQueryFunc returns a new query function that executes instant queries against
// the given engine. // the given engine.
// It converts scalar into vector results. // It converts scalar into vector results.
func EngineQueryFunc(engine *promql.Engine, q storage.Queryable) QueryFunc { func EngineQueryFunc(engine promql.QueryEngine, q storage.Queryable) QueryFunc {
return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
q, err := engine.NewInstantQuery(ctx, q, nil, qs, t) q, err := engine.NewInstantQuery(ctx, q, nil, qs, t)
if err != nil { if err != nil {

View file

@ -956,13 +956,14 @@ func (c *scrapeCache) iterDone(flushCache bool) {
} }
} }
func (c *scrapeCache) get(met []byte) (*cacheEntry, bool) { func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) {
e, ok := c.series[string(met)] e, ok := c.series[string(met)]
if !ok { if !ok {
return nil, false return nil, false, false
} }
alreadyScraped := e.lastIter == c.iter
e.lastIter = c.iter e.lastIter = c.iter
return e, true return e, true, alreadyScraped
} }
func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) { func (c *scrapeCache) addRef(met []byte, ref storage.SeriesRef, lset labels.Labels, hash uint64) {
@ -1568,7 +1569,7 @@ loop:
if sl.cache.getDropped(met) { if sl.cache.getDropped(met) {
continue continue
} }
ce, ok := sl.cache.get(met) ce, ok, seriesAlreadyScraped := sl.cache.get(met)
var ( var (
ref storage.SeriesRef ref storage.SeriesRef
hash uint64 hash uint64
@ -1577,6 +1578,7 @@ loop:
if ok { if ok {
ref = ce.ref ref = ce.ref
lset = ce.lset lset = ce.lset
hash = ce.hash
// Update metadata only if it changed in the current iteration. // Update metadata only if it changed in the current iteration.
updateMetadata(lset, false) updateMetadata(lset, false)
@ -1613,25 +1615,36 @@ loop:
updateMetadata(lset, true) updateMetadata(lset, true)
} }
if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { if seriesAlreadyScraped {
ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) err = storage.ErrDuplicateSampleForTimestamp
if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. } else {
// CT is an experimental feature. For now, we don't need to fail the if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil {
// scrape on errors updating the created timestamp, log debug. ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs)
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now.
// CT is an experimental feature. For now, we don't need to fail the
// scrape on errors updating the created timestamp, log debug.
level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err)
}
}
if isHistogram {
if h != nil {
ref, err = app.AppendHistogram(ref, lset, t, h, nil)
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
}
} else {
ref, err = app.Append(ref, lset, t, val)
} }
} }
if isHistogram { if err == nil {
if h != nil { if (parsedTimestamp == nil || sl.trackTimestampsStaleness) && ce != nil {
ref, err = app.AppendHistogram(ref, lset, t, h, nil) sl.cache.trackStaleness(ce.hash, ce.lset)
} else {
ref, err = app.AppendHistogram(ref, lset, t, nil, fh)
} }
} else {
ref, err = app.Append(ref, lset, t, val)
} }
sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs)
if err != nil { if err != nil {
if !errors.Is(err, storage.ErrNotFound) { if !errors.Is(err, storage.ErrNotFound) {
level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err)
@ -1652,6 +1665,8 @@ loop:
// Increment added even if there's an error so we correctly report the // Increment added even if there's an error so we correctly report the
// number of samples remaining after relabeling. // number of samples remaining after relabeling.
// We still report duplicated samples here since this number should be the exact number
// of time series exposed on a scrape after relabelling.
added++ added++
exemplars = exemplars[:0] // Reset and reuse the exemplar slice. exemplars = exemplars[:0] // Reset and reuse the exemplar slice.
for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) {
@ -1746,12 +1761,9 @@ loop:
// Adds samples to the appender, checking the error, and then returns the # of samples added, // Adds samples to the appender, checking the error, and then returns the # of samples added,
// whether the caller should continue to process more samples, and any sample or bucket limit errors. // whether the caller should continue to process more samples, and any sample or bucket limit errors.
func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) {
switch { switch {
case err == nil: case err == nil:
if (tp == nil || sl.trackTimestampsStaleness) && ce != nil {
sl.cache.trackStaleness(ce.hash, ce.lset)
}
return true, nil return true, nil
case errors.Is(err, storage.ErrNotFound): case errors.Is(err, storage.ErrNotFound):
return false, storage.ErrNotFound return false, storage.ErrNotFound
@ -1874,7 +1886,7 @@ func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err er
} }
func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error { func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v float64, b *labels.Builder) error {
ce, ok := sl.cache.get(s) ce, ok, _ := sl.cache.get(s)
var ref storage.SeriesRef var ref storage.SeriesRef
var lset labels.Labels var lset labels.Labels
if ok { if ok {

View file

@ -1069,6 +1069,7 @@ func makeTestMetrics(n int) []byte {
fmt.Fprintf(&sb, "# HELP metric_a help text\n") fmt.Fprintf(&sb, "# HELP metric_a help text\n")
fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100) fmt.Fprintf(&sb, "metric_a{foo=\"%d\",bar=\"%d\"} 1\n", i, i*100)
} }
fmt.Fprintf(&sb, "# EOF\n")
return sb.Bytes() return sb.Bytes()
} }
@ -2636,6 +2637,9 @@ func TestScrapeLoopDiscardDuplicateLabels(t *testing.T) {
_, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{}) _, _, _, err := sl.append(slApp, []byte("test_metric{le=\"500\"} 1\ntest_metric{le=\"600\",le=\"700\"} 1\n"), "", time.Time{})
require.Error(t, err) require.Error(t, err)
require.NoError(t, slApp.Rollback()) require.NoError(t, slApp.Rollback())
// We need to cycle staleness cache maps after a manual rollback. Otherwise they will have old entries in them,
// which would cause ErrDuplicateSampleForTimestamp errors on the next append.
sl.cache.iterDone(true)
q, err := s.Querier(time.Time{}.UnixNano(), 0) q, err := s.Querier(time.Time{}.UnixNano(), 0)
require.NoError(t, err) require.NoError(t, err)
@ -2972,7 +2976,7 @@ func TestReuseCacheRace(t *testing.T) {
func TestCheckAddError(t *testing.T) { func TestCheckAddError(t *testing.T) {
var appErrs appendErrors var appErrs appendErrors
sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)} sl := scrapeLoop{l: log.NewNopLogger(), metrics: newTestScrapeMetrics(t)}
sl.checkAddError(nil, nil, nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs) sl.checkAddError(nil, storage.ErrOutOfOrderSample, nil, nil, &appErrs)
require.Equal(t, 1, appErrs.numOutOfOrder) require.Equal(t, 1, appErrs.numOutOfOrder)
} }
@ -3601,6 +3605,34 @@ func BenchmarkTargetScraperGzip(b *testing.B) {
} }
} }
// When a scrape contains multiple instances for the same time series we should increment
// prometheus_target_scrapes_sample_duplicate_timestamp_total metric.
func TestScrapeLoopSeriesAddedDuplicates(t *testing.T) {
ctx, sl := simpleTestScrapeLoop(t)
slApp := sl.appender(ctx)
total, added, seriesAdded, err := sl.append(slApp, []byte("test_metric 1\ntest_metric 2\ntest_metric 3\n"), "", time.Time{})
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 1, seriesAdded)
slApp = sl.appender(ctx)
total, added, seriesAdded, err = sl.append(slApp, []byte("test_metric 1\ntest_metric 1\ntest_metric 1\n"), "", time.Time{})
require.NoError(t, err)
require.NoError(t, slApp.Commit())
require.Equal(t, 3, total)
require.Equal(t, 3, added)
require.Equal(t, 0, seriesAdded)
metric := dto.Metric{}
err = sl.metrics.targetScrapeSampleDuplicate.Write(&metric)
require.NoError(t, err)
value := metric.GetCounter().GetValue()
require.Equal(t, 4.0, value)
}
// This tests running a full scrape loop and checking that the scrape option // This tests running a full scrape loop and checking that the scrape option
// `native_histogram_min_bucket_factor` is used correctly. // `native_histogram_min_bucket_factor` is used correctly.
func TestNativeHistogramMaxSchemaSet(t *testing.T) { func TestNativeHistogramMaxSchemaSet(t *testing.T) {

View file

@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- name: install Go - name: install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
@ -35,4 +35,4 @@ jobs:
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with: with:
version: v1.55.2 version: v1.56.2

View file

@ -37,7 +37,7 @@ if [ -z "${GITHUB_TOKEN}" ]; then
fi fi
# List of files that should be synced. # List of files that should be synced.
SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml" SYNC_FILES="CODE_OF_CONDUCT.md LICENSE Makefile.common SECURITY.md .yamllint scripts/golangci-lint.yml .github/workflows/scorecards.yml .github/workflows/container_description.yml"
# Go to the root of the repo # Go to the root of the repo
cd "$(git rev-parse --show-cdup)" || exit 1 cd "$(git rev-parse --show-cdup)" || exit 1
@ -99,6 +99,15 @@ check_go() {
curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/go.mod" curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/go.mod"
} }
check_docker() {
local org_repo
local default_branch
org_repo="$1"
default_branch="$2"
curl -sLf -o /dev/null "https://raw.githubusercontent.com/${org_repo}/${default_branch}/Dockerfile"
}
process_repo() { process_repo() {
local org_repo local org_repo
local default_branch local default_branch
@ -119,6 +128,10 @@ process_repo() {
echo "${org_repo} is not Go, skipping golangci-lint.yml." echo "${org_repo} is not Go, skipping golangci-lint.yml."
continue continue
fi fi
if [[ "${source_file}" == '.github/workflows/container_description.yml' ]] && ! check_docker "${org_repo}" "${default_branch}" ; then
echo "${org_repo} has no Dockerfile, skipping container_description.yml."
continue
fi
if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then if [[ "${source_file}" == 'LICENSE' ]] && ! check_license "${target_file}" ; then
echo "LICENSE in ${org_repo} is not apache, skipping." echo "LICENSE in ${org_repo} is not apache, skipping."
continue continue
@ -131,7 +144,7 @@ process_repo() {
if [[ -z "${target_file}" ]]; then if [[ -z "${target_file}" ]]; then
echo "${target_filename} doesn't exist in ${org_repo}" echo "${target_filename} doesn't exist in ${org_repo}"
case "${source_file}" in case "${source_file}" in
CODE_OF_CONDUCT.md | SECURITY.md) CODE_OF_CONDUCT.md | SECURITY.md | .github/workflows/container_description.yml)
echo "${source_file} missing in ${org_repo}, force updating." echo "${source_file} missing in ${org_repo}, force updating."
needs_update+=("${source_file}") needs_update+=("${source_file}")
;; ;;

View file

@ -357,12 +357,12 @@ func TestMergeChunkQuerierWithNoVerticalChunkSeriesMerger(t *testing.T) {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
var p ChunkQuerier var p ChunkQuerier
if tc.primaryChkQuerierSeries != nil { if tc.primaryChkQuerierSeries != nil {
p = &mockChunkQurier{toReturn: tc.primaryChkQuerierSeries} p = &mockChunkQuerier{toReturn: tc.primaryChkQuerierSeries}
} }
var qs []ChunkQuerier var qs []ChunkQuerier
for _, in := range tc.chkQuerierSeries { for _, in := range tc.chkQuerierSeries {
qs = append(qs, &mockChunkQurier{toReturn: in}) qs = append(qs, &mockChunkQuerier{toReturn: in})
} }
qs = append(qs, tc.extraQueriers...) qs = append(qs, tc.extraQueriers...)
@ -934,7 +934,7 @@ func (m *mockQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints,
return NewMockSeriesSet(cpy...) return NewMockSeriesSet(cpy...)
} }
type mockChunkQurier struct { type mockChunkQuerier struct {
LabelQuerier LabelQuerier
toReturn []ChunkSeries toReturn []ChunkSeries
@ -948,7 +948,7 @@ func (a chunkSeriesByLabel) Less(i, j int) bool {
return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 return labels.Compare(a[i].Labels(), a[j].Labels()) < 0
} }
func (m *mockChunkQurier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet { func (m *mockChunkQuerier) Select(_ context.Context, sortSeries bool, _ *SelectHints, _ ...*labels.Matcher) ChunkSeriesSet {
cpy := make([]ChunkSeries, len(m.toReturn)) cpy := make([]ChunkSeries, len(m.toReturn))
copy(cpy, m.toReturn) copy(cpy, m.toReturn)
if sortSeries { if sortSeries {

View file

@ -61,6 +61,12 @@ type OAuthConfig struct {
TenantID string `yaml:"tenant_id,omitempty"` TenantID string `yaml:"tenant_id,omitempty"`
} }
// SDKConfig is used to store azure SDK config values.
type SDKConfig struct {
// TenantID is the tenantId of the azure active directory application that is being used to authenticate.
TenantID string `yaml:"tenant_id,omitempty"`
}
// AzureADConfig is used to store the config values. // AzureADConfig is used to store the config values.
type AzureADConfig struct { //nolint:revive // exported. type AzureADConfig struct { //nolint:revive // exported.
// ManagedIdentity is the managed identity that is being used to authenticate. // ManagedIdentity is the managed identity that is being used to authenticate.
@ -69,6 +75,9 @@ type AzureADConfig struct { //nolint:revive // exported.
// OAuth is the oauth config that is being used to authenticate. // OAuth is the oauth config that is being used to authenticate.
OAuth *OAuthConfig `yaml:"oauth,omitempty"` OAuth *OAuthConfig `yaml:"oauth,omitempty"`
// OAuth is the oauth config that is being used to authenticate.
SDK *SDKConfig `yaml:"sdk,omitempty"`
// Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina. // Cloud is the Azure cloud in which the service is running. Example: AzurePublic/AzureGovernment/AzureChina.
Cloud string `yaml:"cloud,omitempty"` Cloud string `yaml:"cloud,omitempty"`
} }
@ -102,14 +111,22 @@ func (c *AzureADConfig) Validate() error {
return fmt.Errorf("must provide a cloud in the Azure AD config") return fmt.Errorf("must provide a cloud in the Azure AD config")
} }
if c.ManagedIdentity == nil && c.OAuth == nil { if c.ManagedIdentity == nil && c.OAuth == nil && c.SDK == nil {
return fmt.Errorf("must provide an Azure Managed Identity or Azure OAuth in the Azure AD config") return fmt.Errorf("must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config")
} }
if c.ManagedIdentity != nil && c.OAuth != nil { if c.ManagedIdentity != nil && c.OAuth != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config") return fmt.Errorf("cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config")
} }
if c.ManagedIdentity != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure Managed Identity and Azure SDK in the Azure AD config")
}
if c.OAuth != nil && c.SDK != nil {
return fmt.Errorf("cannot provide both Azure OAuth and Azure SDK in the Azure AD config")
}
if c.ManagedIdentity != nil { if c.ManagedIdentity != nil {
if c.ManagedIdentity.ClientID == "" { if c.ManagedIdentity.ClientID == "" {
return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config") return fmt.Errorf("must provide an Azure Managed Identity client_id in the Azure AD config")
@ -143,6 +160,17 @@ func (c *AzureADConfig) Validate() error {
} }
} }
if c.SDK != nil {
var err error
if c.SDK.TenantID != "" {
_, err = regexp.MatchString("^[0-9a-zA-Z-.]+$", c.SDK.TenantID)
if err != nil {
return fmt.Errorf("the provided Azure OAuth tenant_id is invalid")
}
}
}
return nil return nil
} }
@ -225,6 +253,16 @@ func newTokenCredential(cfg *AzureADConfig) (azcore.TokenCredential, error) {
} }
} }
if cfg.SDK != nil {
sdkConfig := &SDKConfig{
TenantID: cfg.SDK.TenantID,
}
cred, err = newSDKTokenCredential(clientOpts, sdkConfig)
if err != nil {
return nil, err
}
}
return cred, nil return cred, nil
} }
@ -241,6 +279,12 @@ func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAut
return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts) return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts)
} }
// newSDKTokenCredential returns new SDK token credential.
func newSDKTokenCredential(clientOpts *azcore.ClientOptions, sdkConfig *SDKConfig) (azcore.TokenCredential, error) {
opts := &azidentity.DefaultAzureCredentialOptions{ClientOptions: *clientOpts, TenantID: sdkConfig.TenantID}
return azidentity.NewDefaultAzureCredential(opts)
}
// newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of // newTokenProvider helps to fetch accessToken for different types of credential. This also takes care of
// refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests. // refreshing the accessToken before expiry. This accessToken is attached to the Authorization header while making requests.
func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) { func newTokenProvider(cfg *AzureADConfig, cred azcore.TokenCredential) (*tokenProvider, error) {

View file

@ -39,7 +39,7 @@ const (
testTokenString = "testTokenString" testTokenString = "testTokenString"
) )
var testTokenExpiry = time.Now().Add(5 * time.Second) func testTokenExpiry() time.Time { return time.Now().Add(5 * time.Second) }
type AzureAdTestSuite struct { type AzureAdTestSuite struct {
suite.Suite suite.Suite
@ -94,7 +94,7 @@ func (ad *AzureAdTestSuite) TestAzureAdRoundTripper() {
testToken := &azcore.AccessToken{ testToken := &azcore.AccessToken{
Token: testTokenString, Token: testTokenString,
ExpiresOn: testTokenExpiry, ExpiresOn: testTokenExpiry(),
} }
ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil) ad.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil)
@ -145,7 +145,7 @@ func TestAzureAdConfig(t *testing.T) {
// Missing managedidentiy or oauth field. // Missing managedidentiy or oauth field.
{ {
filename: "testdata/azuread_bad_configmissing.yaml", filename: "testdata/azuread_bad_configmissing.yaml",
err: "must provide an Azure Managed Identity or Azure OAuth in the Azure AD config", err: "must provide an Azure Managed Identity, Azure OAuth or Azure SDK in the Azure AD config",
}, },
// Invalid managedidentity client id. // Invalid managedidentity client id.
{ {
@ -162,6 +162,11 @@ func TestAzureAdConfig(t *testing.T) {
filename: "testdata/azuread_bad_twoconfig.yaml", filename: "testdata/azuread_bad_twoconfig.yaml",
err: "cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config", err: "cannot provide both Azure Managed Identity and Azure OAuth in the Azure AD config",
}, },
// Invalid config when both sdk and oauth is provided.
{
filename: "testdata/azuread_bad_oauthsdkconfig.yaml",
err: "cannot provide both Azure OAuth and Azure SDK in the Azure AD config",
},
// Valid config with missing optionally cloud field. // Valid config with missing optionally cloud field.
{ {
filename: "testdata/azuread_good_cloudmissing.yaml", filename: "testdata/azuread_good_cloudmissing.yaml",
@ -174,6 +179,10 @@ func TestAzureAdConfig(t *testing.T) {
{ {
filename: "testdata/azuread_good_oauth.yaml", filename: "testdata/azuread_good_oauth.yaml",
}, },
// Valid SDK config.
{
filename: "testdata/azuread_good_sdk.yaml",
},
} }
for _, c := range cases { for _, c := range cases {
_, err := loadAzureAdConfig(c.filename) _, err := loadAzureAdConfig(c.filename)
@ -232,6 +241,16 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
}, },
err: "Cloud is not specified or is incorrect: ", err: "Cloud is not specified or is incorrect: ",
}, },
// Invalid tokenProvider for SDK.
{
cfg: &AzureADConfig{
Cloud: "PublicAzure",
SDK: &SDKConfig{
TenantID: dummyTenantID,
},
},
err: "Cloud is not specified or is incorrect: ",
},
// Valid tokenProvider for managedidentity. // Valid tokenProvider for managedidentity.
{ {
cfg: &AzureADConfig{ cfg: &AzureADConfig{
@ -252,6 +271,15 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
}, },
}, },
}, },
// Valid tokenProvider for SDK.
{
cfg: &AzureADConfig{
Cloud: "AzurePublic",
SDK: &SDKConfig{
TenantID: dummyTenantID,
},
},
},
} }
mockGetTokenCallCounter := 1 mockGetTokenCallCounter := 1
for _, c := range cases { for _, c := range cases {
@ -264,11 +292,11 @@ func (s *TokenProviderTestSuite) TestNewTokenProvider() {
} else { } else {
testToken := &azcore.AccessToken{ testToken := &azcore.AccessToken{
Token: testTokenString, Token: testTokenString,
ExpiresOn: testTokenExpiry, ExpiresOn: testTokenExpiry(),
} }
s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once(). s.mockCredential.On("GetToken", mock.Anything, mock.Anything).Return(*testToken, nil).Once().
On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil) On("GetToken", mock.Anything, mock.Anything).Return(getToken(), nil).Once()
actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential) actualTokenProvider, actualErr := newTokenProvider(c.cfg, s.mockCredential)

View file

@ -0,0 +1,7 @@
cloud: AzurePublic
oauth:
client_id: 00000000-0000-0000-0000-000000000000
client_secret: Cl1ent$ecret!
tenant_id: 00000000-a12b-3cd4-e56f-000000000000
sdk:
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -0,0 +1,3 @@
cloud: AzurePublic
sdk:
tenant_id: 00000000-a12b-3cd4-e56f-000000000000

View file

@ -3,7 +3,6 @@
This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1]. This files in the `prometheus/` and `prometheusremotewrite/` are copied from the OpenTelemetry Project[^1].
This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`. This is done instead of adding a go.mod dependency because OpenTelemetry depends on `prometheus/prometheus` and a cyclic dependency will be created. This is just a temporary solution and the long-term solution is to move the required packages from OpenTelemetry into `prometheus/prometheus`.
We don't copy in `./prometheus` through this script because that package imports a collector specific featuregate package we don't want to import. The featuregate package is being removed now, and in the future we will copy this folder too.
To update the dependency is a multi-step process: To update the dependency is a multi-step process:
1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib) 1. Vendor the latest `prometheus/prometheus`@`main` into [`opentelemetry/opentelemetry-collector-contrib`](https://github.com/open-telemetry/opentelemetry-collector-contrib)
@ -20,4 +19,4 @@ This means if we depend on the upstream packages directly, we will never able to
When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy When we do want to make changes to the types in `prompb`, we might need to edit the files directly. That is OK, please let @gouthamve or @jesusvazquez know so they can take care of updating the upstream code (by vendoring in `prometheus/prometheus` upstream and resolving conflicts) and then will run the copy
script again to keep things updated. script again to keep things updated.
[^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite [^1]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus and https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheusremotewrite

View file

@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import ( import (
"strings" "strings"

View file

@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" package prometheus // import "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus"
import "strings" import "strings"

View file

@ -23,5 +23,5 @@ case $(sed --help 2>&1) in
*) set sed -i '';; *) set sed -i '';;
esac esac
"$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go "$@" -e 's#github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus#github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus#g' ./prometheusremotewrite/*.go ./prometheus/*.go
"$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go "$@" -e '1s#^#// DO NOT EDIT. COPIED AS-IS. SEE ../README.md\n\n#g' ./prometheusremotewrite/*.go ./prometheus/*.go

View file

@ -202,34 +202,16 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
return err return err
} }
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs) chunks := h.getChunkSeriesSet(ctx, query, filteredMatchers)
if err != nil { if err := chunks.Err(); err != nil {
return err return err
} }
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
ws, err := StreamChunkedReadResponses( ws, err := StreamChunkedReadResponses(
NewChunkedWriter(w, f), NewChunkedWriter(w, f),
int64(i), int64(i),
// The streaming API has to provide the series sorted. // The streaming API has to provide the series sorted.
querier.Select(ctx, true, hints, filteredMatchers...), chunks,
sortedExternalLabels, sortedExternalLabels,
h.remoteReadMaxBytesInFrame, h.remoteReadMaxBytesInFrame,
h.marshalPool, h.marshalPool,
@ -254,6 +236,35 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re
} }
} }
// getChunkSeriesSet executes a query to retrieve a ChunkSeriesSet,
// encapsulating the operation in its own function to ensure timely release of
// the querier resources.
func (h *readHandler) getChunkSeriesSet(ctx context.Context, query *prompb.Query, filteredMatchers []*labels.Matcher) storage.ChunkSeriesSet {
querier, err := h.queryable.ChunkQuerier(query.StartTimestampMs, query.EndTimestampMs)
if err != nil {
return storage.ErrChunkSeriesSet(err)
}
defer func() {
if err := querier.Close(); err != nil {
level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error())
}
}()
var hints *storage.SelectHints
if query.Hints != nil {
hints = &storage.SelectHints{
Start: query.Hints.StartMs,
End: query.Hints.EndMs,
Step: query.Hints.StepMs,
Func: query.Hints.Func,
Grouping: query.Hints.Grouping,
Range: query.Hints.RangeMs,
By: query.Hints.By,
}
}
return querier.Select(ctx, true, hints, filteredMatchers...)
}
// filterExtLabelsFromMatchers change equality matchers which match external labels // filterExtLabelsFromMatchers change equality matchers which match external labels
// to a matcher that looks for an empty label, // to a matcher that looks for an empty label,
// as that label should not be present in the storage. // as that label should not be present in the storage.

View file

@ -209,6 +209,22 @@ func TestCorruptedChunk(t *testing.T) {
} }
} }
func sequenceFiles(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
func TestLabelValuesWithMatchers(t *testing.T) { func TestLabelValuesWithMatchers(t *testing.T) {
tmpdir := t.TempDir() tmpdir := t.TempDir()
ctx := context.Background() ctx := context.Background()

View file

@ -202,15 +202,6 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) {
}, nil }, nil
} }
// PopulatedChunk creates a chunk populated with samples every second starting at minTime.
func PopulatedChunk(numSamples int, minTime int64) (Meta, error) {
samples := make([]Sample, numSamples)
for i := 0; i < numSamples; i++ {
samples[i] = sample{t: minTime + int64(i*1000), f: 1.0}
}
return ChunkFromSamples(samples)
}
// ChunkMetasToSamples converts a slice of chunk meta data to a slice of samples. // ChunkMetasToSamples converts a slice of chunk meta data to a slice of samples.
// Used in tests to compare the content of chunks. // Used in tests to compare the content of chunks.
func ChunkMetasToSamples(chunks []Meta) (result []Sample) { func ChunkMetasToSamples(chunks []Meta) (result []Sample) {

View file

@ -15,7 +15,6 @@ package chunks
import ( import (
"bufio" "bufio"
"bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -690,7 +689,6 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
sgmIndex, chkStart := ref.Unpack() sgmIndex, chkStart := ref.Unpack()
// We skip the series ref and the mint/maxt beforehand. // We skip the series ref and the mint/maxt beforehand.
chkStart += SeriesRefSize + (2 * MintMaxtSize) chkStart += SeriesRefSize + (2 * MintMaxtSize)
chkCRC32 := newCRC32()
// If it is the current open file, then the chunks can be in the buffer too. // If it is the current open file, then the chunks can be in the buffer too.
if sgmIndex == cdm.curFileSequence { if sgmIndex == cdm.curFileSequence {
@ -755,20 +753,13 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error
// Check the CRC. // Check the CRC.
sum := mmapFile.byteSlice.Range(chkDataEnd, chkDataEnd+CRCSize) sum := mmapFile.byteSlice.Range(chkDataEnd, chkDataEnd+CRCSize)
if _, err := chkCRC32.Write(mmapFile.byteSlice.Range(chkStart-(SeriesRefSize+2*MintMaxtSize), chkDataEnd)); err != nil { if err := checkCRC32(mmapFile.byteSlice.Range(chkStart-(SeriesRefSize+2*MintMaxtSize), chkDataEnd), sum); err != nil {
return nil, &CorruptionErr{ return nil, &CorruptionErr{
Dir: cdm.dir.Name(), Dir: cdm.dir.Name(),
FileIndex: sgmIndex, FileIndex: sgmIndex,
Err: err, Err: err,
} }
} }
if act := chkCRC32.Sum(nil); !bytes.Equal(act, sum) {
return nil, &CorruptionErr{
Dir: cdm.dir.Name(),
FileIndex: sgmIndex,
Err: fmt.Errorf("checksum mismatch expected:%x, actual:%x", sum, act),
}
}
// The chunk data itself. // The chunk data itself.
chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd) chkData := mmapFile.byteSlice.Range(chkDataEnd-int(chkDataLen), chkDataEnd)
@ -802,8 +793,6 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
cdm.fileMaxtSet = true cdm.fileMaxtSet = true
}() }()
chkCRC32 := newCRC32()
// Iterate files in ascending order. // Iterate files in ascending order.
segIDs := make([]int, 0, len(cdm.mmappedChunkFiles)) segIDs := make([]int, 0, len(cdm.mmappedChunkFiles))
for seg := range cdm.mmappedChunkFiles { for seg := range cdm.mmappedChunkFiles {
@ -838,7 +827,6 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
" - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID), " - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID),
} }
} }
chkCRC32.Reset()
chunkRef := newChunkDiskMapperRef(uint64(segID), uint64(idx)) chunkRef := newChunkDiskMapperRef(uint64(segID), uint64(idx))
startIdx := idx startIdx := idx
@ -877,14 +865,11 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu
// Check CRC. // Check CRC.
sum := mmapFile.byteSlice.Range(idx, idx+CRCSize) sum := mmapFile.byteSlice.Range(idx, idx+CRCSize)
if _, err := chkCRC32.Write(mmapFile.byteSlice.Range(startIdx, idx)); err != nil { if err := checkCRC32(mmapFile.byteSlice.Range(startIdx, idx), sum); err != nil {
return err
}
if act := chkCRC32.Sum(nil); !bytes.Equal(act, sum) {
return &CorruptionErr{ return &CorruptionErr{
Dir: cdm.dir.Name(), Dir: cdm.dir.Name(),
FileIndex: segID, FileIndex: segID,
Err: fmt.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), Err: err,
} }
} }
idx += CRCSize idx += CRCSize

View file

@ -24,7 +24,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"slices" "slices"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -530,9 +529,10 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue
if err := head.Init(maxBlockTime); err != nil { if err := head.Init(maxBlockTime); err != nil {
return nil, fmt.Errorf("read WAL: %w", err) return nil, fmt.Errorf("read WAL: %w", err)
} }
// Set the wal to nil to disable all wal operations. // Set the wal and the wbl to nil to disable related operations.
// This is mainly to avoid blocking when closing the head. // This is mainly to avoid blocking when closing the head.
head.wal = nil head.wal = nil
head.wbl = nil
} }
db.closers = append(db.closers, head) db.closers = append(db.closers, head)
@ -779,10 +779,6 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs
walDir := filepath.Join(dir, "wal") walDir := filepath.Join(dir, "wal")
wblDir := filepath.Join(dir, wlog.WblDirName) wblDir := filepath.Join(dir, wlog.WblDirName)
// Migrate old WAL if one exists.
if err := MigrateWAL(l, walDir); err != nil {
return nil, fmt.Errorf("migrate WAL: %w", err)
}
for _, tmpDir := range []string{walDir, dir} { for _, tmpDir := range []string{walDir, dir} {
// Remove tmp dirs. // Remove tmp dirs.
if err := removeBestEffortTmpDirs(l, tmpDir); err != nil { if err := removeBestEffortTmpDirs(l, tmpDir); err != nil {
@ -1613,9 +1609,9 @@ func BeyondTimeRetention(db *DB, blocks []*Block) (deletable map[ulid.ULID]struc
deletable = make(map[ulid.ULID]struct{}) deletable = make(map[ulid.ULID]struct{})
for i, block := range blocks { for i, block := range blocks {
// The difference between the first block and this block is larger than // The difference between the first block and this block is greater than or equal to
// the retention period so any blocks after that are added as deletable. // the retention period so any blocks after that are added as deletable.
if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > db.opts.RetentionDuration { if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime >= db.opts.RetentionDuration {
for _, b := range blocks[i:] { for _, b := range blocks[i:] {
deletable[b.meta.ULID] = struct{}{} deletable[b.meta.ULID] = struct{}{}
} }
@ -2213,39 +2209,6 @@ func blockDirs(dir string) ([]string, error) {
return dirs, nil return dirs, nil
} }
func sequenceFiles(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var res []string
for _, fi := range files {
if _, err := strconv.ParseUint(fi.Name(), 10, 64); err != nil {
continue
}
res = append(res, filepath.Join(dir, fi.Name()))
}
return res, nil
}
func nextSequenceFile(dir string) (string, int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return "", 0, err
}
i := uint64(0)
for _, f := range files {
j, err := strconv.ParseUint(f.Name(), 10, 64)
if err != nil {
continue
}
i = j
}
return filepath.Join(dir, fmt.Sprintf("%0.6d", i+1)), int(i + 1), nil
}
func exponential(d, min, max time.Duration) time.Duration { func exponential(d, min, max time.Duration) time.Duration {
d *= 2 d *= 2
if d < min { if d < min {

View file

@ -1463,34 +1463,66 @@ func (*mockCompactorFailing) CompactOOO(string, *OOOCompactionHead) (result []ul
} }
func TestTimeRetention(t *testing.T) { func TestTimeRetention(t *testing.T) {
db := openTestDB(t, nil, []int64{1000}) testCases := []struct {
defer func() { name string
require.NoError(t, db.Close()) blocks []*BlockMeta
}() expBlocks []*BlockMeta
retentionDuration int64
blocks := []*BlockMeta{ }{
{MinTime: 500, MaxTime: 900}, // Oldest block {
{MinTime: 1000, MaxTime: 1500}, name: "Block max time delta greater than retention duration",
{MinTime: 1500, MaxTime: 2000}, // Newest Block blocks: []*BlockMeta{
{MinTime: 500, MaxTime: 900}, // Oldest block, beyond retention
{MinTime: 1000, MaxTime: 1500},
{MinTime: 1500, MaxTime: 2000}, // Newest block
},
expBlocks: []*BlockMeta{
{MinTime: 1000, MaxTime: 1500},
{MinTime: 1500, MaxTime: 2000},
},
retentionDuration: 1000,
},
{
name: "Block max time delta equal to retention duration",
blocks: []*BlockMeta{
{MinTime: 500, MaxTime: 900}, // Oldest block
{MinTime: 1000, MaxTime: 1500}, // Coinciding exactly with the retention duration.
{MinTime: 1500, MaxTime: 2000}, // Newest block
},
expBlocks: []*BlockMeta{
{MinTime: 1500, MaxTime: 2000},
},
retentionDuration: 500,
},
} }
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
db := openTestDB(t, nil, []int64{1000})
defer func() {
require.NoError(t, db.Close())
}()
for _, m := range blocks { for _, m := range tc.blocks {
createBlock(t, db.Dir(), genSeries(10, 10, m.MinTime, m.MaxTime)) createBlock(t, db.Dir(), genSeries(10, 10, m.MinTime, m.MaxTime))
}
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks.
require.Len(t, db.Blocks(), len(tc.blocks)) // Ensure all blocks are registered.
db.opts.RetentionDuration = tc.retentionDuration
// Reloading should truncate the blocks which are >= the retention duration vs the first block.
require.NoError(t, db.reloadBlocks())
actBlocks := db.Blocks()
require.Equal(t, 1, int(prom_testutil.ToFloat64(db.metrics.timeRetentionCount)), "metric retention count mismatch")
require.Len(t, actBlocks, len(tc.expBlocks))
for i, eb := range tc.expBlocks {
require.Equal(t, eb.MinTime, actBlocks[i].meta.MinTime)
require.Equal(t, eb.MaxTime, actBlocks[i].meta.MaxTime)
}
})
} }
require.NoError(t, db.reloadBlocks()) // Reload the db to register the new blocks.
require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered.
db.opts.RetentionDuration = blocks[2].MaxTime - blocks[1].MinTime
require.NoError(t, db.reloadBlocks())
expBlocks := blocks[1:]
actBlocks := db.Blocks()
require.Equal(t, 1, int(prom_testutil.ToFloat64(db.metrics.timeRetentionCount)), "metric retention count mismatch")
require.Equal(t, len(expBlocks), len(actBlocks))
require.Equal(t, expBlocks[0].MaxTime, actBlocks[0].meta.MaxTime)
require.Equal(t, expBlocks[len(expBlocks)-1].MaxTime, actBlocks[len(actBlocks)-1].meta.MaxTime)
} }
func TestRetentionDurationMetric(t *testing.T) { func TestRetentionDurationMetric(t *testing.T) {
@ -1966,6 +1998,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
// Should be set to init values if no WAL or blocks exist so far. // Should be set to init values if no WAL or blocks exist so far.
require.Equal(t, int64(math.MaxInt64), db.head.MinTime()) require.Equal(t, int64(math.MaxInt64), db.head.MinTime())
require.Equal(t, int64(math.MinInt64), db.head.MaxTime()) require.Equal(t, int64(math.MinInt64), db.head.MaxTime())
require.False(t, db.head.initialized())
// First added sample initializes the writable range. // First added sample initializes the writable range.
ctx := context.Background() ctx := context.Background()
@ -1975,6 +2008,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(1000), db.head.MinTime()) require.Equal(t, int64(1000), db.head.MinTime())
require.Equal(t, int64(1000), db.head.MaxTime()) require.Equal(t, int64(1000), db.head.MaxTime())
require.True(t, db.head.initialized())
}) })
t.Run("wal-only", func(t *testing.T) { t.Run("wal-only", func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
@ -2003,6 +2037,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(5000), db.head.MinTime()) require.Equal(t, int64(5000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime()) require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
}) })
t.Run("existing-block", func(t *testing.T) { t.Run("existing-block", func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
@ -2015,6 +2050,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(2000), db.head.MinTime()) require.Equal(t, int64(2000), db.head.MinTime())
require.Equal(t, int64(2000), db.head.MaxTime()) require.Equal(t, int64(2000), db.head.MaxTime())
require.True(t, db.head.initialized())
}) })
t.Run("existing-block-and-wal", func(t *testing.T) { t.Run("existing-block-and-wal", func(t *testing.T) {
dir := t.TempDir() dir := t.TempDir()
@ -2047,6 +2083,7 @@ func TestInitializeHeadTimestamp(t *testing.T) {
require.Equal(t, int64(6000), db.head.MinTime()) require.Equal(t, int64(6000), db.head.MinTime())
require.Equal(t, int64(15000), db.head.MaxTime()) require.Equal(t, int64(15000), db.head.MaxTime())
require.True(t, db.head.initialized())
// Check that old series has been GCed. // Check that old series has been GCed.
require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series)) require.Equal(t, 1.0, prom_testutil.ToFloat64(db.head.metrics.series))
}) })
@ -3598,7 +3635,7 @@ func testChunkQuerierShouldNotPanicIfHeadChunkIsTruncatedWhileReadingQueriedChun
// just to iterate through the bytes slice. We don't really care the reason why // just to iterate through the bytes slice. We don't really care the reason why
// we read this data, we just need to read it to make sure the memory address // we read this data, we just need to read it to make sure the memory address
// of the []byte is still valid. // of the []byte is still valid.
chkCRC32 := newCRC32() chkCRC32 := crc32.New(crc32.MakeTable(crc32.Castagnoli))
for _, chunk := range chunks { for _, chunk := range chunks {
chkCRC32.Reset() chkCRC32.Reset()
_, err := chkCRC32.Write(chunk.Bytes()) _, err := chkCRC32.Write(chunk.Bytes())

View file

@ -620,6 +620,7 @@ func (h *Head) Init(minValidTime int64) error {
refSeries := make(map[chunks.HeadSeriesRef]*memSeries) refSeries := make(map[chunks.HeadSeriesRef]*memSeries)
snapshotLoaded := false snapshotLoaded := false
var chunkSnapshotLoadDuration time.Duration
if h.opts.EnableMemorySnapshotOnShutdown { if h.opts.EnableMemorySnapshotOnShutdown {
level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot")
// If there are any WAL files, there should be at least one WAL file with an index that is current or newer // If there are any WAL files, there should be at least one WAL file with an index that is current or newer
@ -650,7 +651,8 @@ func (h *Head) Init(minValidTime int64) error {
snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot() snapIdx, snapOffset, refSeries, err = h.loadChunkSnapshot()
if err == nil { if err == nil {
snapshotLoaded = true snapshotLoaded = true
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", time.Since(start).String()) chunkSnapshotLoadDuration = time.Since(start)
level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String())
} }
if err != nil { if err != nil {
snapIdx, snapOffset = -1, 0 snapIdx, snapOffset = -1, 0
@ -672,6 +674,8 @@ func (h *Head) Init(minValidTime int64) error {
oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk
lastMmapRef chunks.ChunkDiskMapperRef lastMmapRef chunks.ChunkDiskMapperRef
err error err error
mmapChunkReplayDuration time.Duration
) )
if snapshotLoaded || h.wal != nil { if snapshotLoaded || h.wal != nil {
// If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded // If snapshot was not loaded and if there is no WAL, then m-map chunks will be discarded
@ -695,7 +699,8 @@ func (h *Head) Init(minValidTime int64) error {
return err return err
} }
} }
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", time.Since(mmapChunkReplayStart).String()) mmapChunkReplayDuration = time.Since(mmapChunkReplayStart)
level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String())
} }
if h.wal == nil { if h.wal == nil {
@ -817,6 +822,8 @@ func (h *Head) Init(minValidTime int64) error {
"checkpoint_replay_duration", checkpointReplayDuration.String(), "checkpoint_replay_duration", checkpointReplayDuration.String(),
"wal_replay_duration", walReplayDuration.String(), "wal_replay_duration", walReplayDuration.String(),
"wbl_replay_duration", wblReplayDuration.String(), "wbl_replay_duration", wblReplayDuration.String(),
"chunk_snapshot_load_duration", chunkSnapshotLoadDuration.String(),
"mmap_chunk_replay_duration", mmapChunkReplayDuration.String(),
"total_replay_duration", totalReplayDuration.String(), "total_replay_duration", totalReplayDuration.String(),
) )
@ -1074,11 +1081,11 @@ func (h *Head) SetMinValidTime(minValidTime int64) {
// Truncate removes old data before mint from the head and WAL. // Truncate removes old data before mint from the head and WAL.
func (h *Head) Truncate(mint int64) (err error) { func (h *Head) Truncate(mint int64) (err error) {
initialize := h.MinTime() == math.MaxInt64 initialized := h.initialized()
if err := h.truncateMemory(mint); err != nil { if err := h.truncateMemory(mint); err != nil {
return err return err
} }
if initialize { if !initialized {
return nil return nil
} }
return h.truncateWAL(mint) return h.truncateWAL(mint)
@ -1100,9 +1107,9 @@ func (h *Head) truncateMemory(mint int64) (err error) {
} }
}() }()
initialize := h.MinTime() == math.MaxInt64 initialized := h.initialized()
if h.MinTime() >= mint && !initialize { if h.MinTime() >= mint && initialized {
return nil return nil
} }
@ -1113,7 +1120,7 @@ func (h *Head) truncateMemory(mint int64) (err error) {
defer h.memTruncationInProcess.Store(false) defer h.memTruncationInProcess.Store(false)
// We wait for pending queries to end that overlap with this truncation. // We wait for pending queries to end that overlap with this truncation.
if !initialize { if initialized {
h.WaitForPendingReadersInTimeRange(h.MinTime(), mint) h.WaitForPendingReadersInTimeRange(h.MinTime(), mint)
} }
@ -1127,7 +1134,7 @@ func (h *Head) truncateMemory(mint int64) (err error) {
// This was an initial call to Truncate after loading blocks on startup. // This was an initial call to Truncate after loading blocks on startup.
// We haven't read back the WAL yet, so do not attempt to truncate it. // We haven't read back the WAL yet, so do not attempt to truncate it.
if initialize { if !initialized {
return nil return nil
} }
@ -1615,10 +1622,19 @@ func (h *Head) MaxOOOTime() int64 {
return h.maxOOOTime.Load() return h.maxOOOTime.Load()
} }
// initialized returns true if the head has a MinTime set, false otherwise.
func (h *Head) initialized() bool {
return h.MinTime() != math.MaxInt64
}
// compactable returns whether the head has a compactable range. // compactable returns whether the head has a compactable range.
// The head has a compactable range when the head time range is 1.5 times the chunk range. // The head has a compactable range when the head time range is 1.5 times the chunk range.
// The 0.5 acts as a buffer of the appendable window. // The 0.5 acts as a buffer of the appendable window.
func (h *Head) compactable() bool { func (h *Head) compactable() bool {
if !h.initialized() {
return false
}
return h.MaxTime()-h.MinTime() > h.chunkRange.Load()/2*3 return h.MaxTime()-h.MinTime() > h.chunkRange.Load()/2*3
} }

View file

@ -138,7 +138,7 @@ func (h *Head) Appender(_ context.Context) storage.Appender {
// The head cache might not have a starting point yet. The init appender // The head cache might not have a starting point yet. The init appender
// picks up the first appended timestamp as the base. // picks up the first appended timestamp as the base.
if h.MinTime() == math.MaxInt64 { if !h.initialized() {
return &initAppender{ return &initAppender{
head: h, head: h,
} }
@ -191,7 +191,7 @@ func (h *Head) appendableMinValidTime() int64 {
// AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head. // AppendableMinValidTime returns the minimum valid time for samples to be appended to the Head.
// Returns false if Head hasn't been initialized yet and the minimum time isn't known yet. // Returns false if Head hasn't been initialized yet and the minimum time isn't known yet.
func (h *Head) AppendableMinValidTime() (int64, bool) { func (h *Head) AppendableMinValidTime() (int64, bool) {
if h.MinTime() == math.MaxInt64 { if !h.initialized() {
return 0, false return 0, false
} }

View file

@ -5819,3 +5819,16 @@ func TestHeadAppender_AppendCTZeroSample(t *testing.T) {
require.Equal(t, chunkenc.ValNone, it.Next()) require.Equal(t, chunkenc.ValNone, it.Next())
} }
} }
func TestHeadCompactableDoesNotCompactEmptyHead(t *testing.T) {
// Use a chunk range of 1 here so that if we attempted to determine if the head
// was compactable using default values for min and max times, `Head.compactable()`
// would return true which is incorrect. This test verifies that we short-circuit
// the check when the head has not yet had any samples added.
head, _ := newTestHead(t, 1, wlog.CompressionNone, false)
defer func() {
require.NoError(t, head.Close())
}()
require.False(t, head.compactable())
}

View file

@ -1829,7 +1829,7 @@ func NewStringListIter(s []string) StringIter {
return &stringListIter{l: s} return &stringListIter{l: s}
} }
// symbolsIter implements StringIter. // stringListIter implements StringIter.
type stringListIter struct { type stringListIter struct {
l []string l []string
cur string cur string

View file

@ -19,8 +19,6 @@ import (
"fmt" "fmt"
"math" "math"
"slices" "slices"
"strings"
"unicode/utf8"
"github.com/oklog/ulid" "github.com/oklog/ulid"
@ -35,20 +33,6 @@ import (
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
) )
// Bitmap used by func isRegexMetaCharacter to check whether a character needs to be escaped.
var regexMetaCharacterBytes [16]byte
// isRegexMetaCharacter reports whether byte b needs to be escaped.
func isRegexMetaCharacter(b byte) bool {
return b < utf8.RuneSelf && regexMetaCharacterBytes[b%16]&(1<<(b/16)) != 0
}
func init() {
for _, b := range []byte(`.+*?()|[]{}^$`) {
regexMetaCharacterBytes[b%16] |= 1 << (b / 16)
}
}
type blockBaseQuerier struct { type blockBaseQuerier struct {
blockID ulid.ULID blockID ulid.ULID
index IndexReader index IndexReader
@ -195,55 +179,6 @@ func (q *blockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *
return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming) return NewBlockChunkSeriesSet(q.blockID, q.index, q.chunks, q.tombstones, p, mint, maxt, disableTrimming)
} }
func findSetMatches(pattern string) []string {
// Return empty matches if the wrapper from Prometheus is missing.
if len(pattern) < 6 || pattern[:4] != "^(?:" || pattern[len(pattern)-2:] != ")$" {
return nil
}
escaped := false
sets := []*strings.Builder{{}}
init := 4
end := len(pattern) - 2
// If the regex is wrapped in a group we can remove the first and last parentheses
if pattern[init] == '(' && pattern[end-1] == ')' {
init++
end--
}
for i := init; i < end; i++ {
if escaped {
switch {
case isRegexMetaCharacter(pattern[i]):
sets[len(sets)-1].WriteByte(pattern[i])
case pattern[i] == '\\':
sets[len(sets)-1].WriteByte('\\')
default:
return nil
}
escaped = false
} else {
switch {
case isRegexMetaCharacter(pattern[i]):
if pattern[i] == '|' {
sets = append(sets, &strings.Builder{})
} else {
return nil
}
case pattern[i] == '\\':
escaped = true
default:
sets[len(sets)-1].WriteByte(pattern[i])
}
}
}
matches := make([]string, 0, len(sets))
for _, s := range sets {
if s.Len() > 0 {
matches = append(matches, s.String())
}
}
return matches
}
// PostingsForMatchers assembles a single postings iterator against the index reader // PostingsForMatchers assembles a single postings iterator against the index reader
// based on the given matchers. The resulting postings are not ordered by series. // based on the given matchers. The resulting postings are not ordered by series.
func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) { func PostingsForMatchers(ctx context.Context, ix IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
@ -385,7 +320,7 @@ func postingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Matcher)
// Fast-path for set matching. // Fast-path for set matching.
if m.Type == labels.MatchRegexp { if m.Type == labels.MatchRegexp {
setMatches := findSetMatches(m.GetRegexString()) setMatches := m.SetMatches()
if len(setMatches) > 0 { if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...) return ix.Postings(ctx, m.Name, setMatches...)
} }
@ -416,7 +351,7 @@ func inversePostingsForMatcher(ctx context.Context, ix IndexReader, m *labels.Ma
// Inverse of a MatchNotRegexp is MatchRegexp (double negation). // Inverse of a MatchNotRegexp is MatchRegexp (double negation).
// Fast-path for set matching. // Fast-path for set matching.
if m.Type == labels.MatchNotRegexp { if m.Type == labels.MatchNotRegexp {
setMatches := findSetMatches(m.GetRegexString()) setMatches := m.SetMatches()
if len(setMatches) > 0 { if len(setMatches) > 0 {
return ix.Postings(ctx, m.Name, setMatches...) return ix.Postings(ctx, m.Name, setMatches...)
} }

View file

@ -2658,54 +2658,6 @@ func BenchmarkSetMatcher(b *testing.B) {
} }
} }
// Refer to https://github.com/prometheus/prometheus/issues/2651.
func TestFindSetMatches(t *testing.T) {
cases := []struct {
pattern string
exp []string
}{
// Single value, coming from a `bar=~"foo"` selector.
{
pattern: "^(?:foo)$",
exp: []string{
"foo",
},
},
// Simple sets.
{
pattern: "^(?:foo|bar|baz)$",
exp: []string{
"foo",
"bar",
"baz",
},
},
// Simple sets containing escaped characters.
{
pattern: "^(?:fo\\.o|bar\\?|\\^baz)$",
exp: []string{
"fo.o",
"bar?",
"^baz",
},
},
// Simple sets containing special characters without escaping.
{
pattern: "^(?:fo.o|bar?|^baz)$",
exp: nil,
},
// Missing wrapper.
{
pattern: "foo|bar|baz",
exp: nil,
},
}
for _, c := range cases {
require.Equal(t, c.exp, findSetMatches(c.pattern), "Evaluating %s, unexpected result.", c.pattern)
}
}
func TestPostingsForMatchers(t *testing.T) { func TestPostingsForMatchers(t *testing.T) {
ctx := context.Background() ctx := context.Background()
@ -3310,7 +3262,7 @@ func TestPostingsForMatcher(t *testing.T) {
{ {
// Test case for double quoted regex matcher // Test case for double quoted regex matcher
matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "^(?:a|b)$"), matcher: labels.MustNewMatcher(labels.MatchRegexp, "test", "^(?:a|b)$"),
hasError: true, hasError: false,
}, },
} }

File diff suppressed because it is too large Load diff

View file

@ -1,553 +0,0 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
package tsdb
import (
"encoding/binary"
"io"
"math/rand"
"os"
"path"
"path/filepath"
"testing"
"time"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/record"
"github.com/prometheus/prometheus/tsdb/tombstones"
"github.com/prometheus/prometheus/tsdb/wlog"
"github.com/prometheus/prometheus/util/testutil"
)
func TestSegmentWAL_cut(t *testing.T) {
tmpdir := t.TempDir()
// This calls cut() implicitly the first time without a previous tail.
w, err := OpenSegmentWAL(tmpdir, nil, 0, nil)
require.NoError(t, err)
require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
require.NoError(t, w.cut())
// Cutting creates a new file.
require.Len(t, w.files, 2)
require.NoError(t, w.write(WALEntrySeries, 1, []byte("Hello World!!")))
require.NoError(t, w.Close())
for _, of := range w.files {
f, err := os.Open(of.Name())
require.NoError(t, err)
// Verify header data.
metab := make([]byte, 8)
_, err = f.Read(metab)
require.NoError(t, err)
require.Equal(t, WALMagic, binary.BigEndian.Uint32(metab[:4]))
require.Equal(t, WALFormatDefault, metab[4])
// We cannot actually check for correct pre-allocation as it is
// optional per filesystem and handled transparently.
et, flag, b, err := newWALReader(nil, nil).entry(f)
require.NoError(t, err)
require.Equal(t, WALEntrySeries, et)
require.Equal(t, byte(walSeriesSimple), flag)
require.Equal(t, []byte("Hello World!!"), b)
}
}
func TestSegmentWAL_Truncate(t *testing.T) {
const (
numMetrics = 20000
batch = 100
)
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
require.NoError(t, err)
dir := t.TempDir()
w, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w)
w.segmentSize = 10000
for i := 0; i < numMetrics; i += batch {
var rs []record.RefSeries
for j, s := range series[i : i+batch] {
rs = append(rs, record.RefSeries{Labels: s, Ref: chunks.HeadSeriesRef(i+j) + 1})
}
err := w.LogSeries(rs)
require.NoError(t, err)
}
// We mark the 2nd half of the files with a min timestamp that should discard
// them from the selection of compactable files.
for i, f := range w.files[len(w.files)/2:] {
f.maxTime = int64(1000 + i)
}
// All series in those files must be preserved regarding of the provided postings list.
boundarySeries := w.files[len(w.files)/2].minSeries
// We truncate while keeping every 2nd series.
keep := map[chunks.HeadSeriesRef]struct{}{}
for i := 1; i <= numMetrics; i += 2 {
keep[chunks.HeadSeriesRef(i)] = struct{}{}
}
keepf := func(id chunks.HeadSeriesRef) bool {
_, ok := keep[id]
return ok
}
err = w.Truncate(1000, keepf)
require.NoError(t, err)
var expected []record.RefSeries
for i := 1; i <= numMetrics; i++ {
if i%2 == 1 || chunks.HeadSeriesRef(i) >= boundarySeries {
expected = append(expected, record.RefSeries{Ref: chunks.HeadSeriesRef(i), Labels: series[i-1]})
}
}
// Call Truncate once again to see whether we can read the written file without
// creating a new WAL.
err = w.Truncate(1000, keepf)
require.NoError(t, err)
require.NoError(t, w.Close())
// The same again with a new WAL.
w, err = OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w)
var readSeries []record.RefSeries
r := w.Reader()
require.NoError(t, r.Read(func(s []record.RefSeries) {
readSeries = append(readSeries, s...)
}, nil, nil))
testutil.RequireEqual(t, expected, readSeries)
}
// Symmetrical test of reading and writing to the WAL via its main interface.
func TestSegmentWAL_Log_Restore(t *testing.T) {
const (
numMetrics = 50
iterations = 5
stepSize = 5
)
// Generate testing data. It does not make semantic sense but
// for the purpose of this test.
series, err := labels.ReadLabels(filepath.Join("testdata", "20kseries.json"), numMetrics)
require.NoError(t, err)
dir := t.TempDir()
var (
recordedSeries [][]record.RefSeries
recordedSamples [][]record.RefSample
recordedDeletes [][]tombstones.Stone
)
var totalSamples int
// Open WAL a bunch of times, validate all previous data can be read,
// write more data to it, close it.
for k := 0; k < numMetrics; k += numMetrics / iterations {
w, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err)
// Set smaller segment size so we can actually write several files.
w.segmentSize = 1000 * 1000
r := w.Reader()
var (
resultSeries [][]record.RefSeries
resultSamples [][]record.RefSample
resultDeletes [][]tombstones.Stone
)
serf := func(series []record.RefSeries) {
if len(series) > 0 {
clsets := make([]record.RefSeries, len(series))
copy(clsets, series)
resultSeries = append(resultSeries, clsets)
}
}
smplf := func(smpls []record.RefSample) {
if len(smpls) > 0 {
csmpls := make([]record.RefSample, len(smpls))
copy(csmpls, smpls)
resultSamples = append(resultSamples, csmpls)
}
}
delf := func(stones []tombstones.Stone) {
if len(stones) > 0 {
cst := make([]tombstones.Stone, len(stones))
copy(cst, stones)
resultDeletes = append(resultDeletes, cst)
}
}
require.NoError(t, r.Read(serf, smplf, delf))
testutil.RequireEqual(t, recordedSamples, resultSamples)
testutil.RequireEqual(t, recordedSeries, resultSeries)
testutil.RequireEqual(t, recordedDeletes, resultDeletes)
series := series[k : k+(numMetrics/iterations)]
// Insert in batches and generate different amounts of samples for each.
for i := 0; i < len(series); i += stepSize {
var samples []record.RefSample
var stones []tombstones.Stone
for j := 0; j < i*10; j++ {
samples = append(samples, record.RefSample{
Ref: chunks.HeadSeriesRef(j % 10000),
T: int64(j * 2),
V: rand.Float64(),
})
}
for j := 0; j < i*20; j++ {
ts := rand.Int63()
stones = append(stones, tombstones.Stone{Ref: storage.SeriesRef(rand.Uint64()), Intervals: tombstones.Intervals{{Mint: ts, Maxt: ts + rand.Int63n(10000)}}})
}
lbls := series[i : i+stepSize]
series := make([]record.RefSeries, 0, len(series))
for j, l := range lbls {
series = append(series, record.RefSeries{
Ref: chunks.HeadSeriesRef(i + j),
Labels: l,
})
}
require.NoError(t, w.LogSeries(series))
require.NoError(t, w.LogSamples(samples))
require.NoError(t, w.LogDeletes(stones))
if len(lbls) > 0 {
recordedSeries = append(recordedSeries, series)
}
if len(samples) > 0 {
recordedSamples = append(recordedSamples, samples)
totalSamples += len(samples)
}
if len(stones) > 0 {
recordedDeletes = append(recordedDeletes, stones)
}
}
require.NoError(t, w.Close())
}
}
func TestWALRestoreCorrupted_invalidSegment(t *testing.T) {
dir := t.TempDir()
wal, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(wal)
_, err = wal.createSegmentFile(filepath.Join(dir, "000000"))
require.NoError(t, err)
f, err := wal.createSegmentFile(filepath.Join(dir, "000001"))
require.NoError(t, err)
f2, err := wal.createSegmentFile(filepath.Join(dir, "000002"))
require.NoError(t, err)
require.NoError(t, f2.Close())
// Make header of second segment invalid.
_, err = f.WriteAt([]byte{1, 2, 3, 4}, 0)
require.NoError(t, err)
require.NoError(t, f.Close())
require.NoError(t, wal.Close())
wal, err = OpenSegmentWAL(dir, log.NewLogfmtLogger(os.Stderr), 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(wal)
files, err := os.ReadDir(dir)
require.NoError(t, err)
fns := []string{}
for _, f := range files {
fns = append(fns, f.Name())
}
require.Equal(t, []string{"000000"}, fns)
}
// Test reading from a WAL that has been corrupted through various means.
func TestWALRestoreCorrupted(t *testing.T) {
cases := []struct {
name string
f func(*testing.T, *SegmentWAL)
}{
{
name: "truncate_checksum",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
require.NoError(t, err)
require.NoError(t, f.Truncate(off-1))
},
},
{
name: "truncate_body",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
require.NoError(t, err)
require.NoError(t, f.Truncate(off-8))
},
},
{
name: "body_content",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
require.NoError(t, err)
// Write junk before checksum starts.
_, err = f.WriteAt([]byte{1, 2, 3, 4}, off-8)
require.NoError(t, err)
},
},
{
name: "checksum",
f: func(t *testing.T, w *SegmentWAL) {
f, err := os.OpenFile(w.files[0].Name(), os.O_WRONLY, 0o666)
require.NoError(t, err)
defer f.Close()
off, err := f.Seek(0, io.SeekEnd)
require.NoError(t, err)
// Write junk into checksum
_, err = f.WriteAt([]byte{1, 2, 3, 4}, off-4)
require.NoError(t, err)
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
// Generate testing data. It does not make semantic sense but
// for the purpose of this test.
dir := t.TempDir()
w, err := OpenSegmentWAL(dir, nil, 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w)
require.NoError(t, w.LogSamples([]record.RefSample{{T: 1, V: 2}}))
require.NoError(t, w.LogSamples([]record.RefSample{{T: 2, V: 3}}))
require.NoError(t, w.cut())
// Sleep 2 seconds to avoid error where cut and test "cases" function may write or
// truncate the file out of orders as "cases" are not synchronized with cut.
// Hopefully cut will complete by 2 seconds.
time.Sleep(2 * time.Second)
require.NoError(t, w.LogSamples([]record.RefSample{{T: 3, V: 4}}))
require.NoError(t, w.LogSamples([]record.RefSample{{T: 5, V: 6}}))
require.NoError(t, w.Close())
// cut() truncates and fsyncs the first segment async. If it happens after
// the corruption we apply below, the corruption will be overwritten again.
// Fire and forget a sync to avoid flakiness.
w.files[0].Sync()
// Corrupt the second entry in the first file.
// After re-opening we must be able to read the first entry
// and the rest, including the second file, must be truncated for clean further
// writes.
c.f(t, w)
logger := log.NewLogfmtLogger(os.Stderr)
w2, err := OpenSegmentWAL(dir, logger, 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w2)
r := w2.Reader()
serf := func(l []record.RefSeries) {
require.Empty(t, l)
}
// Weird hack to check order of reads.
i := 0
samplef := func(s []record.RefSample) {
if i == 0 {
require.Equal(t, []record.RefSample{{T: 1, V: 2}}, s)
i++
} else {
require.Equal(t, []record.RefSample{{T: 99, V: 100}}, s)
}
}
require.NoError(t, r.Read(serf, samplef, nil))
require.NoError(t, w2.LogSamples([]record.RefSample{{T: 99, V: 100}}))
require.NoError(t, w2.Close())
// We should see the first valid entry and the new one, everything after
// is truncated.
w3, err := OpenSegmentWAL(dir, logger, 0, nil)
require.NoError(t, err)
defer func(wal *SegmentWAL) { require.NoError(t, wal.Close()) }(w3)
r = w3.Reader()
i = 0
require.NoError(t, r.Read(serf, samplef, nil))
})
}
}
func TestMigrateWAL_Empty(t *testing.T) {
// The migration procedure must properly deal with a zero-length segment,
// which is valid in the new format.
dir := t.TempDir()
wdir := path.Join(dir, "wal")
// Initialize empty WAL.
w, err := wlog.New(nil, nil, wdir, wlog.CompressionNone)
require.NoError(t, err)
require.NoError(t, w.Close())
require.NoError(t, MigrateWAL(nil, wdir))
}
func TestMigrateWAL_Fuzz(t *testing.T) {
dir := t.TempDir()
wdir := path.Join(dir, "wal")
// Should pass if no WAL exists yet.
require.NoError(t, MigrateWAL(nil, wdir))
oldWAL, err := OpenSegmentWAL(wdir, nil, time.Minute, nil)
require.NoError(t, err)
// Write some data.
require.NoError(t, oldWAL.LogSeries([]record.RefSeries{
{Ref: 100, Labels: labels.FromStrings("abc", "def", "123", "456")},
{Ref: 1, Labels: labels.FromStrings("abc", "def2", "1234", "4567")},
}))
require.NoError(t, oldWAL.LogSamples([]record.RefSample{
{Ref: 1, T: 100, V: 200},
{Ref: 2, T: 300, V: 400},
}))
require.NoError(t, oldWAL.LogSeries([]record.RefSeries{
{Ref: 200, Labels: labels.FromStrings("xyz", "def", "foo", "bar")},
}))
require.NoError(t, oldWAL.LogSamples([]record.RefSample{
{Ref: 3, T: 100, V: 200},
{Ref: 4, T: 300, V: 400},
}))
require.NoError(t, oldWAL.LogDeletes([]tombstones.Stone{
{Ref: 1, Intervals: []tombstones.Interval{{Mint: 100, Maxt: 200}}},
}))
require.NoError(t, oldWAL.Close())
// Perform migration.
require.NoError(t, MigrateWAL(nil, wdir))
w, err := wlog.New(nil, nil, wdir, wlog.CompressionNone)
require.NoError(t, err)
// We can properly write some new data after migration.
var enc record.Encoder
require.NoError(t, w.Log(enc.Samples([]record.RefSample{
{Ref: 500, T: 1, V: 1},
}, nil)))
require.NoError(t, w.Close())
// Read back all data.
sr, err := wlog.NewSegmentsReader(wdir)
require.NoError(t, err)
r := wlog.NewReader(sr)
var res []interface{}
dec := record.NewDecoder(labels.NewSymbolTable())
for r.Next() {
rec := r.Record()
switch dec.Type(rec) {
case record.Series:
s, err := dec.Series(rec, nil)
require.NoError(t, err)
res = append(res, s)
case record.Samples:
s, err := dec.Samples(rec, nil)
require.NoError(t, err)
res = append(res, s)
case record.Tombstones:
s, err := dec.Tombstones(rec, nil)
require.NoError(t, err)
res = append(res, s)
default:
require.Fail(t, "unknown record type %d", dec.Type(rec))
}
}
require.NoError(t, r.Err())
testutil.RequireEqual(t, []interface{}{
[]record.RefSeries{
{Ref: 100, Labels: labels.FromStrings("abc", "def", "123", "456")},
{Ref: 1, Labels: labels.FromStrings("abc", "def2", "1234", "4567")},
},
[]record.RefSample{{Ref: 1, T: 100, V: 200}, {Ref: 2, T: 300, V: 400}},
[]record.RefSeries{
{Ref: 200, Labels: labels.FromStrings("xyz", "def", "foo", "bar")},
},
[]record.RefSample{{Ref: 3, T: 100, V: 200}, {Ref: 4, T: 300, V: 400}},
[]tombstones.Stone{{Ref: 1, Intervals: []tombstones.Interval{{Mint: 100, Maxt: 200}}}},
[]record.RefSample{{Ref: 500, T: 1, V: 1}},
}, res)
// Migrating an already migrated WAL shouldn't do anything.
require.NoError(t, MigrateWAL(nil, wdir))
}

View file

@ -149,22 +149,23 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
r := NewReader(sgmReader) r := NewReader(sgmReader)
var ( var (
series []record.RefSeries series []record.RefSeries
samples []record.RefSample samples []record.RefSample
histogramSamples []record.RefHistogramSample histogramSamples []record.RefHistogramSample
tstones []tombstones.Stone floatHistogramSamples []record.RefFloatHistogramSample
exemplars []record.RefExemplar tstones []tombstones.Stone
metadata []record.RefMetadata exemplars []record.RefExemplar
st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function. metadata []record.RefMetadata
dec = record.NewDecoder(st) st = labels.NewSymbolTable() // Needed for decoding; labels do not outlive this function.
enc record.Encoder dec = record.NewDecoder(st)
buf []byte enc record.Encoder
recs [][]byte buf []byte
recs [][]byte
latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata) latestMetadataMap = make(map[chunks.HeadSeriesRef]record.RefMetadata)
) )
for r.Next() { for r.Next() {
series, samples, histogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0] series, samples, histogramSamples, floatHistogramSamples, tstones, exemplars, metadata = series[:0], samples[:0], histogramSamples[:0], floatHistogramSamples[:0], tstones[:0], exemplars[:0], metadata[:0]
// We don't reset the buffer since we batch up multiple records // We don't reset the buffer since we batch up multiple records
// before writing them to the checkpoint. // before writing them to the checkpoint.
@ -224,8 +225,26 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head
if len(repl) > 0 { if len(repl) > 0 {
buf = enc.HistogramSamples(repl, buf) buf = enc.HistogramSamples(repl, buf)
} }
stats.TotalSamples += len(samples) stats.TotalSamples += len(histogramSamples)
stats.DroppedSamples += len(samples) - len(repl) stats.DroppedSamples += len(histogramSamples) - len(repl)
case record.FloatHistogramSamples:
floatHistogramSamples, err = dec.FloatHistogramSamples(rec, floatHistogramSamples)
if err != nil {
return nil, fmt.Errorf("decode float histogram samples: %w", err)
}
// Drop irrelevant floatHistogramSamples in place.
repl := floatHistogramSamples[:0]
for _, fh := range floatHistogramSamples {
if fh.T >= mint {
repl = append(repl, fh)
}
}
if len(repl) > 0 {
buf = enc.FloatHistogramSamples(repl, buf)
}
stats.TotalSamples += len(floatHistogramSamples)
stats.DroppedSamples += len(floatHistogramSamples) - len(repl)
case record.Tombstones: case record.Tombstones:
tstones, err = dec.Tombstones(rec, tstones) tstones, err = dec.Tombstones(rec, tstones)

View file

@ -125,6 +125,20 @@ func TestCheckpoint(t *testing.T) {
PositiveBuckets: []int64{int64(i + 1), 1, -1, 0}, PositiveBuckets: []int64{int64(i + 1), 1, -1, 0},
} }
} }
makeFloatHistogram := func(i int) *histogram.FloatHistogram {
return &histogram.FloatHistogram{
Count: 5 + float64(i*4),
ZeroCount: 2 + float64(i),
ZeroThreshold: 0.001,
Sum: 18.4 * float64(i+1),
Schema: 1,
PositiveSpans: []histogram.Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{float64(i + 1), 1, -1, 0},
}
}
for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} { for _, compress := range []CompressionType{CompressionNone, CompressionSnappy, CompressionZstd} {
t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) { t.Run(fmt.Sprintf("compress=%s", compress), func(t *testing.T) {
@ -154,7 +168,7 @@ func TestCheckpoint(t *testing.T) {
w, err = NewSize(nil, nil, dir, 64*1024, compress) w, err = NewSize(nil, nil, dir, 64*1024, compress)
require.NoError(t, err) require.NoError(t, err)
samplesInWAL, histogramsInWAL := 0, 0 samplesInWAL, histogramsInWAL, floatHistogramsInWAL := 0, 0, 0
var last int64 var last int64
for i := 0; ; i++ { for i := 0; ; i++ {
_, n, err := Segments(w.Dir()) _, n, err := Segments(w.Dir())
@ -200,6 +214,15 @@ func TestCheckpoint(t *testing.T) {
}, nil) }, nil)
require.NoError(t, w.Log(b)) require.NoError(t, w.Log(b))
histogramsInWAL += 4 histogramsInWAL += 4
fh := makeFloatHistogram(i)
b = enc.FloatHistogramSamples([]record.RefFloatHistogramSample{
{Ref: 0, T: last, FH: fh},
{Ref: 1, T: last + 10000, FH: fh},
{Ref: 2, T: last + 20000, FH: fh},
{Ref: 3, T: last + 30000, FH: fh},
}, nil)
require.NoError(t, w.Log(b))
floatHistogramsInWAL += 4
b = enc.Exemplars([]record.RefExemplar{ b = enc.Exemplars([]record.RefExemplar{
{Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))}, {Ref: 1, T: last, V: float64(i), Labels: labels.FromStrings("trace_id", fmt.Sprintf("trace-%d", i))},
@ -220,12 +243,14 @@ func TestCheckpoint(t *testing.T) {
} }
require.NoError(t, w.Close()) require.NoError(t, w.Close())
_, err = Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool { stats, err := Checkpoint(log.NewNopLogger(), w, 100, 106, func(x chunks.HeadSeriesRef) bool {
return x%2 == 0 return x%2 == 0
}, last/2) }, last/2)
require.NoError(t, err) require.NoError(t, err)
require.NoError(t, w.Truncate(107)) require.NoError(t, w.Truncate(107))
require.NoError(t, DeleteCheckpoints(w.Dir(), 106)) require.NoError(t, DeleteCheckpoints(w.Dir(), 106))
require.Equal(t, histogramsInWAL+floatHistogramsInWAL+samplesInWAL, stats.TotalSamples)
require.Greater(t, stats.DroppedSamples, 0)
// Only the new checkpoint should be left. // Only the new checkpoint should be left.
files, err := os.ReadDir(dir) files, err := os.ReadDir(dir)
@ -242,7 +267,7 @@ func TestCheckpoint(t *testing.T) {
var metadata []record.RefMetadata var metadata []record.RefMetadata
r := NewReader(sr) r := NewReader(sr)
samplesInCheckpoint, histogramsInCheckpoint := 0, 0 samplesInCheckpoint, histogramsInCheckpoint, floatHistogramsInCheckpoint := 0, 0, 0
for r.Next() { for r.Next() {
rec := r.Record() rec := r.Record()
@ -264,6 +289,13 @@ func TestCheckpoint(t *testing.T) {
require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp") require.GreaterOrEqual(t, h.T, last/2, "histogram with wrong timestamp")
} }
histogramsInCheckpoint += len(histograms) histogramsInCheckpoint += len(histograms)
case record.FloatHistogramSamples:
floatHistograms, err := dec.FloatHistogramSamples(rec, nil)
require.NoError(t, err)
for _, h := range floatHistograms {
require.GreaterOrEqual(t, h.T, last/2, "float histogram with wrong timestamp")
}
floatHistogramsInCheckpoint += len(floatHistograms)
case record.Exemplars: case record.Exemplars:
exemplars, err := dec.Exemplars(rec, nil) exemplars, err := dec.Exemplars(rec, nil)
require.NoError(t, err) require.NoError(t, err)
@ -281,6 +313,8 @@ func TestCheckpoint(t *testing.T) {
require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8) require.Less(t, float64(samplesInCheckpoint)/float64(samplesInWAL), 0.8)
require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5) require.Greater(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.5)
require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8) require.Less(t, float64(histogramsInCheckpoint)/float64(histogramsInWAL), 0.8)
require.Greater(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.5)
require.Less(t, float64(floatHistogramsInCheckpoint)/float64(floatHistogramsInWAL), 0.8)
expectedRefSeries := []record.RefSeries{ expectedRefSeries := []record.RefSeries{
{Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")}, {Ref: 0, Labels: labels.FromStrings("a", "b", "c", "0")},

View file

@ -14,6 +14,7 @@
package teststorage package teststorage
import ( import (
"fmt"
"os" "os"
"time" "time"
@ -30,8 +31,18 @@ import (
// New returns a new TestStorage for testing purposes // New returns a new TestStorage for testing purposes
// that removes all associated files on closing. // that removes all associated files on closing.
func New(t testutil.T) *TestStorage { func New(t testutil.T) *TestStorage {
stor, err := NewWithError()
require.NoError(t, err)
return stor
}
// NewWithError returns a new TestStorage for user facing tests, which reports
// errors directly.
func NewWithError() (*TestStorage, error) {
dir, err := os.MkdirTemp("", "test_storage") dir, err := os.MkdirTemp("", "test_storage")
require.NoError(t, err, "unexpected error while opening test directory") if err != nil {
return nil, fmt.Errorf("opening test directory: %w", err)
}
// Tests just load data for a series sequentially. Thus we // Tests just load data for a series sequentially. Thus we
// need a long appendable window. // need a long appendable window.
@ -41,13 +52,17 @@ func New(t testutil.T) *TestStorage {
opts.RetentionDuration = 0 opts.RetentionDuration = 0
opts.EnableNativeHistograms = true opts.EnableNativeHistograms = true
db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats()) db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats())
require.NoError(t, err, "unexpected error while opening test storage") if err != nil {
return nil, fmt.Errorf("opening test storage: %w", err)
}
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
eMetrics := tsdb.NewExemplarMetrics(reg) eMetrics := tsdb.NewExemplarMetrics(reg)
es, err := tsdb.NewCircularExemplarStorage(10, eMetrics) es, err := tsdb.NewCircularExemplarStorage(10, eMetrics)
require.NoError(t, err, "unexpected error while opening test exemplar storage") if err != nil {
return &TestStorage{DB: db, exemplarStorage: es, dir: dir} return nil, fmt.Errorf("opening test exemplar storage: %w", err)
}
return &TestStorage{DB: db, exemplarStorage: es, dir: dir}, nil
} }
type TestStorage struct { type TestStorage struct {

View file

@ -177,13 +177,6 @@ type TSDBAdminStats interface {
WALReplayStatus() (tsdb.WALReplayStatus, error) WALReplayStatus() (tsdb.WALReplayStatus, error)
} }
// QueryEngine defines the interface for the *promql.Engine, so it can be replaced, wrapped or mocked.
type QueryEngine interface {
SetQueryLogger(l promql.QueryLogger)
NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error)
NewRangeQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, start, end time.Time, interval time.Duration) (promql.Query, error)
}
type QueryOpts interface { type QueryOpts interface {
EnablePerStepStats() bool EnablePerStepStats() bool
LookbackDelta() time.Duration LookbackDelta() time.Duration
@ -193,7 +186,7 @@ type QueryOpts interface {
// them using the provided storage and query engine. // them using the provided storage and query engine.
type API struct { type API struct {
Queryable storage.SampleAndChunkQueryable Queryable storage.SampleAndChunkQueryable
QueryEngine QueryEngine QueryEngine promql.QueryEngine
ExemplarQueryable storage.ExemplarQueryable ExemplarQueryable storage.ExemplarQueryable
scrapePoolsRetriever func(context.Context) ScrapePoolsRetriever scrapePoolsRetriever func(context.Context) ScrapePoolsRetriever
@ -226,7 +219,7 @@ type API struct {
// NewAPI returns an initialized API type. // NewAPI returns an initialized API type.
func NewAPI( func NewAPI(
qe QueryEngine, qe promql.QueryEngine,
q storage.SampleAndChunkQueryable, q storage.SampleAndChunkQueryable,
ap storage.Appendable, ap storage.Appendable,
eq storage.ExemplarQueryable, eq storage.ExemplarQueryable,
@ -889,6 +882,9 @@ func (api *API) series(r *http.Request) (result apiFuncResult) {
warnings := set.Warnings() warnings := set.Warnings()
for set.Next() { for set.Next() {
if err := ctx.Err(); err != nil {
return apiFuncResult{nil, returnAPIError(err), warnings, closer}
}
metrics = append(metrics, set.At().Labels()) metrics = append(metrics, set.At().Labels())
if len(metrics) >= limit { if len(metrics) >= limit {

View file

@ -3568,6 +3568,9 @@ func TestReturnAPIError(t *testing.T) {
}, { }, {
err: errors.New("exec error"), err: errors.New("exec error"),
expected: errorExec, expected: errorExec,
}, {
err: context.Canceled,
expected: errorCanceled,
}, },
} }
@ -3881,8 +3884,6 @@ type fakeEngine struct {
query fakeQuery query fakeQuery
} }
func (e *fakeEngine) SetQueryLogger(promql.QueryLogger) {}
func (e *fakeEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) { func (e *fakeEngine) NewInstantQuery(ctx context.Context, q storage.Queryable, opts promql.QueryOpts, qs string, ts time.Time) (promql.Query, error) {
return &e.query, nil return &e.query, nil
} }

View file

@ -48,29 +48,29 @@ var scenarios = map[string]struct {
}{ }{
"empty": { "empty": {
params: "", params: "",
code: 200, code: http.StatusOK,
body: ``, body: ``,
}, },
"match nothing": { "match nothing": {
params: "match[]=does_not_match_anything", params: "match[]=does_not_match_anything",
code: 200, code: http.StatusOK,
body: ``, body: ``,
}, },
"invalid params from the beginning": { "invalid params from the beginning": {
params: "match[]=-not-a-valid-metric-name", params: "match[]=-not-a-valid-metric-name",
code: 400, code: http.StatusBadRequest,
body: `1:1: parse error: unexpected <op:-> body: `1:1: parse error: unexpected <op:->
`, `,
}, },
"invalid params somewhere in the middle": { "invalid params somewhere in the middle": {
params: "match[]=not-a-valid-metric-name", params: "match[]=not-a-valid-metric-name",
code: 400, code: http.StatusBadRequest,
body: `1:4: parse error: unexpected <op:-> body: `1:4: parse error: unexpected <op:->
`, `,
}, },
"test_metric1": { "test_metric1": {
params: "match[]=test_metric1", params: "match[]=test_metric1",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i"} 10000 6000000 test_metric1{foo="bar",instance="i"} 10000 6000000
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
@ -78,33 +78,33 @@ test_metric1{foo="boo",instance="i"} 1 6000000
}, },
"test_metric2": { "test_metric2": {
params: "match[]=test_metric2", params: "match[]=test_metric2",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric2 untyped body: `# TYPE test_metric2 untyped
test_metric2{foo="boo",instance="i"} 1 6000000 test_metric2{foo="boo",instance="i"} 1 6000000
`, `,
}, },
"test_metric_without_labels": { "test_metric_without_labels": {
params: "match[]=test_metric_without_labels", params: "match[]=test_metric_without_labels",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric_without_labels untyped body: `# TYPE test_metric_without_labels untyped
test_metric_without_labels{instance=""} 1001 6000000 test_metric_without_labels{instance=""} 1001 6000000
`, `,
}, },
"test_stale_metric": { "test_stale_metric": {
params: "match[]=test_metric_stale", params: "match[]=test_metric_stale",
code: 200, code: http.StatusOK,
body: ``, body: ``,
}, },
"test_old_metric": { "test_old_metric": {
params: "match[]=test_metric_old", params: "match[]=test_metric_old",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric_old untyped body: `# TYPE test_metric_old untyped
test_metric_old{instance=""} 981 5880000 test_metric_old{instance=""} 981 5880000
`, `,
}, },
"{foo='boo'}": { "{foo='boo'}": {
params: "match[]={foo='boo'}", params: "match[]={foo='boo'}",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
# TYPE test_metric2 untyped # TYPE test_metric2 untyped
@ -113,7 +113,7 @@ test_metric2{foo="boo",instance="i"} 1 6000000
}, },
"two matchers": { "two matchers": {
params: "match[]=test_metric1&match[]=test_metric2", params: "match[]=test_metric1&match[]=test_metric2",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i"} 10000 6000000 test_metric1{foo="bar",instance="i"} 10000 6000000
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
@ -123,7 +123,7 @@ test_metric2{foo="boo",instance="i"} 1 6000000
}, },
"two matchers with overlap": { "two matchers with overlap": {
params: "match[]={__name__=~'test_metric1'}&match[]={foo='bar'}", params: "match[]={__name__=~'test_metric1'}&match[]={foo='bar'}",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i"} 10000 6000000 test_metric1{foo="bar",instance="i"} 10000 6000000
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
@ -131,7 +131,7 @@ test_metric1{foo="boo",instance="i"} 1 6000000
}, },
"everything": { "everything": {
params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'. params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'.
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i"} 10000 6000000 test_metric1{foo="bar",instance="i"} 10000 6000000
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
@ -145,7 +145,7 @@ test_metric_without_labels{instance=""} 1001 6000000
}, },
"empty label value matches everything that doesn't have that label": { "empty label value matches everything that doesn't have that label": {
params: "match[]={foo='',__name__=~'.%2b'}", params: "match[]={foo='',__name__=~'.%2b'}",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric_old untyped body: `# TYPE test_metric_old untyped
test_metric_old{instance=""} 981 5880000 test_metric_old{instance=""} 981 5880000
# TYPE test_metric_without_labels untyped # TYPE test_metric_without_labels untyped
@ -154,7 +154,7 @@ test_metric_without_labels{instance=""} 1001 6000000
}, },
"empty label value for a label that doesn't exist at all, matches everything": { "empty label value for a label that doesn't exist at all, matches everything": {
params: "match[]={bar='',__name__=~'.%2b'}", params: "match[]={bar='',__name__=~'.%2b'}",
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i"} 10000 6000000 test_metric1{foo="bar",instance="i"} 10000 6000000
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
@ -169,7 +169,7 @@ test_metric_without_labels{instance=""} 1001 6000000
"external labels are added if not already present": { "external labels are added if not already present": {
params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'. params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'.
externalLabels: labels.FromStrings("foo", "baz", "zone", "ie"), externalLabels: labels.FromStrings("foo", "baz", "zone", "ie"),
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i",zone="ie"} 10000 6000000 test_metric1{foo="bar",instance="i",zone="ie"} 10000 6000000
test_metric1{foo="boo",instance="i",zone="ie"} 1 6000000 test_metric1{foo="boo",instance="i",zone="ie"} 1 6000000
@ -186,7 +186,7 @@ test_metric_without_labels{foo="baz",instance="",zone="ie"} 1001 6000000
// know what it does anyway. // know what it does anyway.
params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'. params: "match[]={__name__=~'.%2b'}", // '%2b' is an URL-encoded '+'.
externalLabels: labels.FromStrings("instance", "baz"), externalLabels: labels.FromStrings("instance", "baz"),
code: 200, code: http.StatusOK,
body: `# TYPE test_metric1 untyped body: `# TYPE test_metric1 untyped
test_metric1{foo="bar",instance="i"} 10000 6000000 test_metric1{foo="bar",instance="i"} 10000 6000000
test_metric1{foo="boo",instance="i"} 1 6000000 test_metric1{foo="boo",instance="i"} 1 6000000
@ -390,7 +390,6 @@ func TestFederationWithNativeHistograms(t *testing.T) {
require.Equal(t, http.StatusOK, res.Code) require.Equal(t, http.StatusOK, res.Code)
body, err := io.ReadAll(res.Body) body, err := io.ReadAll(res.Body)
require.NoError(t, err) require.NoError(t, err)
p := textparse.NewProtobufParser(body, false, labels.NewSymbolTable()) p := textparse.NewProtobufParser(body, false, labels.NewSymbolTable())
var actVec promql.Vector var actVec promql.Vector
metricFamilies := 0 metricFamilies := 0