Merge branch 'main' into resolveconflicts-2.52.1

This commit is contained in:
Arthur Silva Sens 2024-05-31 09:36:17 -03:00
commit d73537ccc0
No known key found for this signature in database
187 changed files with 5168 additions and 3709 deletions

View file

@ -12,7 +12,7 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -13,7 +13,7 @@ jobs:
# should also be updated.
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
@ -27,7 +27,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
@ -43,7 +43,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.21-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: make build
# Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
@ -57,7 +57,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
with:
@ -74,7 +74,7 @@ jobs:
name: Go tests on Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: 1.22.x
@ -91,7 +91,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -114,7 +114,7 @@ jobs:
matrix:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build
with:
@ -137,32 +137,44 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build
with:
parallelism: 12
thread: ${{ matrix.thread }}
golangci:
name: golangci-lint
check_generated_parser:
name: Check generated parser
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
cache: false
go-version: 1.22.x
- name: Run goyacc and check for diff
run: make install-goyacc check-generated-parser
golangci:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.56.2
version: v1.59.0
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@ -175,7 +187,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_main
with:
@ -189,7 +201,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_release
with:
@ -204,7 +216,7 @@ jobs:
needs: [test_ui, codeql]
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- name: Install nodejs
uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2

View file

@ -24,7 +24,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Initialize CodeQL
uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12

View file

@ -17,7 +17,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
@ -37,7 +37,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts

View file

@ -13,7 +13,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: ./scripts/sync_repo_files.sh
env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # tag=v4.1.2
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # tag=v4.1.4
with:
persist-credentials: false
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # tag=v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
with:
name: SARIF file
path: results.sarif

32
.gitpod.Dockerfile vendored
View file

@ -1,15 +1,33 @@
FROM gitpod/workspace-full
# Set Node.js version as an environment variable.
ENV CUSTOM_NODE_VERSION=16
ENV CUSTOM_GO_VERSION=1.19
ENV GOPATH=$HOME/go-packages
ENV GOROOT=$HOME/go
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
# Install and use the specified Node.js version via nvm.
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
# Ensure nvm uses the default Node.js version in all new shells.
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \
&& printf '%s\n' 'export GOPATH=/workspace/go' \
'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go
# Remove any existing Go installation in $HOME path.
RUN rm -rf $HOME/go $HOME/go-packages
# Export go environment variables.
RUN echo "export GOPATH=/workspace/go" >> ~/.bashrc.d/300-go && \
echo "export GOBIN=\$GOPATH/bin" >> ~/.bashrc.d/300-go && \
echo "export GOROOT=${HOME}/go" >> ~/.bashrc.d/300-go && \
echo "export PATH=\$GOROOT/bin:\$GOBIN:\$PATH" >> ~/.bashrc
# Reload the environment variables to ensure go environment variables are
# available in subsequent commands.
RUN bash -c "source ~/.bashrc && source ~/.bashrc.d/300-go"
# Fetch the Go version dynamically from the Prometheus go.mod file and Install Go in $HOME path.
RUN export CUSTOM_GO_VERSION=$(curl -sSL "https://raw.githubusercontent.com/prometheus/prometheus/main/go.mod" | awk '/^go/{print $2".0"}') && \
curl -fsSL "https://dl.google.com/go/go${CUSTOM_GO_VERSION}.linux-amd64.tar.gz" | \
tar -xz -C $HOME
# Fetch the goyacc parser version dynamically from the Prometheus Makefile
# and install it globally in $GOBIN path.
RUN GOYACC_VERSION=$(curl -fsSL "https://raw.githubusercontent.com/prometheus/prometheus/main/Makefile" | awk -F'=' '/GOYACC_VERSION \?=/{gsub(/ /, "", $2); print $2}') && \
go install "golang.org/x/tools/cmd/goyacc@${GOYACC_VERSION}"

View file

@ -21,6 +21,7 @@ linters:
- goimports
- misspell
- nolintlint
- perfsprint
- predeclared
- revive
- testifylint
@ -44,7 +45,9 @@ issues:
- linters:
- godot
source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string concatenation"
linters-settings:
depguard:
rules:
@ -85,6 +88,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus
gofumpt:
extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules.

View file

@ -2,6 +2,14 @@
## unreleased
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980
* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991
## 2.52.1 / 2024-05-29
* [BUGFIX] Linode SD: Fix partial fetch when discovery would return more than 500 elements. #14141

View file

@ -42,7 +42,12 @@ go build ./cmd/prometheus/
make test # Make sure all the tests pass before you commit and push :)
```
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
To run a collection of Go linters through [`golangci-lint`](https://github.com/golangci/golangci-lint), do:
```bash
make lint
```
If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. See [this section of the golangci-lint documentation](https://golangci-lint.run/usage/false-positives/#nolint-directive) for more information.
All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions).

View file

@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 4m
GOYACC_VERSION ?= v0.6.0
include Makefile.common
@ -78,24 +79,42 @@ assets-tarball: assets
@echo '>> packaging assets'
scripts/package_assets.sh
# We only want to generate the parser when there's changes to the grammar.
.PHONY: parser
parser:
@echo ">> running goyacc to generate the .go file."
ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
@echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\""
else
goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
$(MAKE) promql/parser/generated_parser.y.go
endif
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
@echo ">> running goyacc to generate the .go file."
@goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
.PHONY: clean-parser
clean-parser:
@echo ">> cleaning generated parser"
@rm -f promql/parser/generated_parser.y.go
.PHONY: check-generated-parser
check-generated-parser: clean-parser promql/parser/generated_parser.y.go
@echo ">> checking generated parser"
@git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false)
.PHONY: install-goyacc
install-goyacc:
@echo ">> installing goyacc $(GOYACC_VERSION)"
@go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)
.PHONY: test
# If we only want to only test go code we have to change the test target
# which is called by all.
ifeq ($(GO_ONLY),1)
test: common-test check-go-mod-version
else
test: common-test ui-build-module ui-test ui-lint check-go-mod-version
test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
endif
.PHONY: npm_licenses

View file

@ -55,13 +55,13 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),)
endif
endif
PROMU_VERSION ?= 0.15.0
PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.56.2
GOLANGCI_LINT_VERSION ?= v1.59.0
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -42,6 +42,7 @@ import (
"github.com/mwitkow/go-conntrack"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
@ -217,6 +218,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
case "native-histograms":
c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
@ -251,6 +253,18 @@ func main() {
newFlagRetentionDuration model.Duration
)
// Unregister the default GoCollector, and reregister with our defaults.
if prometheus.Unregister(collectors.NewGoCollector()) {
prometheus.MustRegister(
collectors.NewGoCollector(
collectors.WithGoCollectorRuntimeMetrics(
collectors.MetricsGC,
collectors.MetricsScheduler,
),
),
)
}
cfg := flagConfig{
notifier: notifier.Options{
Registerer: prometheus.DefaultRegisterer,
@ -417,7 +431,7 @@ func main() {
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently.").
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly.").
Default("4").Int64Var(&cfg.maxConcurrentEvals)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
@ -771,6 +785,9 @@ func main() {
ResendDelay: time.Duration(cfg.resendDelay),
MaxConcurrentEvals: cfg.maxConcurrentEvals,
ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
DefaultRuleQueryOffset: func() time.Duration {
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
},
})
}

View file

@ -24,6 +24,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
@ -189,7 +190,7 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
require.NotEmpty(t, tc.in, "sender called with 0 alert")
require.Equal(t, tc.exp, alerts)

View file

@ -296,7 +296,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
require.Equal(t, 1, qc)
} else {
require.Greater(t, qc, 0, "no queries logged")
require.Positive(t, qc, "no queries logged")
}
p.validateLastQuery(t, ql)
@ -366,7 +366,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
require.Equal(t, 1, qc)
} else {
require.Greater(t, qc, 0, "no queries logged")
require.Positive(t, qc, "no queries logged")
}
}

View file

@ -88,7 +88,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration)
db, err := tsdb.OpenDBReadOnly(outputDir, nil)
db, err := tsdb.OpenDBReadOnly(outputDir, "", nil)
if err != nil {
return err
}

View file

@ -56,8 +56,8 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/util/documentcli"
)
@ -235,12 +235,14 @@ func main() {
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.")
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
@ -377,7 +379,7 @@ func main() {
case testRulesCmd.FullCommand():
os.Exit(RulesUnitTest(
promql.LazyLoaderOpts{
promqltest.LazyLoaderOpts{
EnableAtModifier: true,
EnableNegativeOffset: true,
},
@ -396,9 +398,9 @@ func main() {
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
case tsdbDumpOpenMetricsCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))

View file

@ -25,6 +25,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} {
t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} {
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal {
args = append(args, "--lint-fatal")

View file

@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
}
func listBlocks(path string, humanReadable bool) error {
db, err := tsdb.OpenDBReadOnly(path, nil)
db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil {
return err
}
@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string {
}
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
db, err := tsdb.OpenDBReadOnly(path, nil)
db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil {
return nil, nil, err
}
@ -708,8 +708,8 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
type SeriesSetFormatter func(series storage.SeriesSet) error
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(path, nil)
func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil {
return err
}
@ -856,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
}
avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets {
percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))

View file

@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/tsdb"
)
@ -64,6 +64,7 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin
err := dumpSamples(
context.Background(),
path,
t.TempDir(),
mint,
maxt,
match,
@ -88,7 +89,7 @@ func normalizeNewLine(b []byte) []byte {
}
func TestTSDBDump(t *testing.T) {
storage := promql.LoadedStorage(t, `
storage := promqltest.LoadedStorage(t, `
load 1m
metric{foo="bar", baz="abc"} 1 2 3 4 5
heavy_metric{foo="bar"} 5 4 3 2 1
@ -158,7 +159,7 @@ func TestTSDBDump(t *testing.T) {
}
func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promql.LoadedStorage(t, `
storage := promqltest.LoadedStorage(t, `
load 1m
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7

View file

@ -36,13 +36,14 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage"
)
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false
var run *regexp.Regexp
@ -69,7 +70,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFla
return successExitCode
}
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
fmt.Println("Unit Testing: ", filename)
b, err := os.ReadFile(filename)
@ -175,9 +176,9 @@ type testGroup struct {
}
// test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite.
suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil {
return []error{err}
}
@ -413,7 +414,7 @@ Outer:
gotSamples = append(gotSamples, parsedSample{
Labels: s.Metric.Copy(),
Value: s.F,
Histogram: promql.HistogramTestExpression(s.H),
Histogram: promqltest.HistogramTestExpression(s.H),
})
}
@ -443,7 +444,7 @@ Outer:
expSamples = append(expSamples, parsedSample{
Labels: lb,
Value: s.Value,
Histogram: promql.HistogramTestExpression(hist),
Histogram: promqltest.HistogramTestExpression(hist),
})
}
@ -572,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
}
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] {
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
}
s += "\n]"

View file

@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/promqltest"
)
func TestRulesUnitTest(t *testing.T) {
@ -28,7 +28,7 @@ func TestRulesUnitTest(t *testing.T) {
tests := []struct {
name string
args args
queryOpts promql.LazyLoaderOpts
queryOpts promqltest.LazyLoaderOpts
want int
}{
{
@ -92,7 +92,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/at-modifier-test.yml"},
},
queryOpts: promql.LazyLoaderOpts{
queryOpts: promqltest.LazyLoaderOpts{
EnableAtModifier: true,
},
want: 0,
@ -109,7 +109,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/negative-offset-test.yml"},
},
queryOpts: promql.LazyLoaderOpts{
queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
},
want: 0,
@ -119,7 +119,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/no-test-group-interval.yml"},
},
queryOpts: promql.LazyLoaderOpts{
queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
},
want: 0,
@ -142,7 +142,7 @@ func TestRulesUnitTestRun(t *testing.T) {
tests := []struct {
name string
args args
queryOpts promql.LazyLoaderOpts
queryOpts promqltest.LazyLoaderOpts
want int
}{
{

View file

@ -145,6 +145,7 @@ var (
ScrapeInterval: model.Duration(1 * time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(1 * time.Minute),
RuleQueryOffset: model.Duration(0 * time.Minute),
// When native histogram feature flag is enabled, ScrapeProtocols default
// changes to DefaultNativeHistogramScrapeProtocols.
ScrapeProtocols: DefaultScrapeProtocols,
@ -397,6 +398,8 @@ type GlobalConfig struct {
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// How frequently to evaluate rules by default.
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
// Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
RuleQueryOffset model.Duration `yaml:"rule_query_offset"`
// File to which PromQL queries are logged.
QueryLogFile string `yaml:"query_log_file,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes.
@ -556,6 +559,7 @@ func (c *GlobalConfig) isZero() bool {
c.ScrapeInterval == 0 &&
c.ScrapeTimeout == 0 &&
c.EvaluationInterval == 0 &&
c.RuleQueryOffset == 0 &&
c.QueryLogFile == "" &&
c.ScrapeProtocols == nil
}

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
@ -279,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if inst.PrivateDnsName != nil {
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.Platform != nil {

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
lightsailLabelRegion: model.LabelValue(d.cfg.Region),
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil {

View file

@ -20,6 +20,7 @@ import (
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil
}

View file

@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
// since the service may be registered remotely through a different node.
var addr string
if serviceNode.Service.Address != "" {
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
} else {
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
}
labels := model.LabelSet{

View file

@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
}
labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelImageName: model.LabelValue(droplet.Image.Name),

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
tg := &targetgroup.Group{}
hostPort := func(a string, p int) model.LabelValue {
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p)))
return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
}
for _, record := range response.Answer {
@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
switch addr := record.(type) {
case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")

View file

@ -15,7 +15,6 @@ package hetzner
import (
"context"
"fmt"
"net"
"net/http"
"strconv"
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
for i, server := range servers {
labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)),
hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)),
hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
}

View file

@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)),
hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
}

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
var cfg discovery.StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Source: strconv.Itoa(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},

View file

@ -334,7 +334,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
}
labels := model.LabelSet{
linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)),
linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)),
linodeLabelName: model.LabelValue(instance.Label),
linodeLabelImage: model.LabelValue(instance.Image),
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
@ -347,13 +347,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
linodeLabelType: model.LabelValue(instance.Type),
linodeLabelStatus: model.LabelValue(instance.Status),
linodeLabelGroup: model.LabelValue(instance.Group),
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)),
linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)),
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
linodeLabelBackups: model.LabelValue(backupsStatus),
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)),
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)),
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)),
linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)),
linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)),
linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)),
}
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig {
var cfg StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Source: strconv.Itoa(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},

View file

@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
host = task.Host
}
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
return net.JoinHostPort(host, strconv.Itoa(int(port)))
}
// Get a list of ports and a list of labels from a PortMapping.

View file

@ -19,16 +19,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
func init() {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
}
// Metrics to be used with a discovery manager.
type Metrics struct {
FailedConfigs prometheus.Gauge

View file

@ -35,6 +35,11 @@ const (
workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue"
)
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
var (
// Metrics for client-go's HTTP requests.
clientGoRequestResultMetricVec = prometheus.NewCounterVec(
@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector {
}
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
for _, collector := range clientGoMetrics() {
err := registerer.Register(collector)
if err != nil {

View file

@ -15,7 +15,7 @@ package moby
import (
"context"
"fmt"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s
labelPrefix + labelNetworkID: network.ID,
labelPrefix + labelNetworkName: network.Name,
labelPrefix + labelNetworkScope: network.Scope,
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal),
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal),
labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress),
}
for k, v := range network.Labels {
ln := strutil.SanitizeLabelName(k)

View file

@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
}
if n.ManagerStatus != nil {
labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
}

View file

@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)

View file

@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)

View file

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/go-kit/log"
"github.com/gophercloud/gophercloud"
@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + h.region),
Source: "OS_" + h.region,
}
// OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
for _, hypervisor := range hypervisorList {
labels := model.LabelSet{}
addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port))
addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port))
labels[model.AddressLabel] = model.LabelValue(addr)
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)

View file

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
}
pager := servers.List(client, opts)
tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + i.region),
Source: "OS_" + i.region,
}
err = pager.EachPage(func(page pagination.Page) (bool, error) {
if ctx.Err() != nil {
@ -194,7 +195,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
lbls[openstackLabelPublicIP] = model.LabelValue(val)
}
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port))
addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
lbls[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, lbls)

View file

@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
model.InstanceLabel: model.LabelValue(server.Name),
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)),
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)),
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),

View file

@ -19,6 +19,7 @@ import (
"net/netip"
"net/url"
"path"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
model.InstanceLabel: model.LabelValue(server.Name),
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)),
vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)),
vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
vpsLabelPrefix + "state": model.LabelValue(server.State),
vpsLabelPrefix + "name": model.LabelValue(server.Name),
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)),
vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)),
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)),
vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)),
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
}

View file

@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
pdbLabelResource: model.LabelValue(resource.Resource),
pdbLabelType: model.LabelValue(resource.Type),
pdbLabelTitle: model.LabelValue(resource.Title),
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)),
pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)),
pdbLabelFile: model.LabelValue(resource.File),
pdbLabelEnvironment: model.LabelValue(resource.Environment),
}

View file

@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
labels[instanceTagsLabel] = model.LabelValue(tags)
}
addr := ""
if server.IPv6 != nil {
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
addr = server.IPv6.Address.String()
}
if server.PublicIP != nil {
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
addr = server.PublicIP.Address.String()
}
if server.PrivateIP != nil {
labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP)
addr = *server.PrivateIP
}
addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10))
if addr != "" {
addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
targets = append(targets, labels)
}
}

View file

@ -60,7 +60,7 @@ api_url: %s
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 2)
require.Len(t, tg.Targets, 3)
for i, lbls := range []model.LabelSet{
{
@ -110,6 +110,28 @@ api_url: %s
"__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "fr-par-1",
},
{
"__address__": "51.158.183.115:80",
"__meta_scaleway_instance_boot_type": "local",
"__meta_scaleway_instance_hostname": "routed-dualstack",
"__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a",
"__meta_scaleway_instance_image_arch": "x86_64",
"__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish",
"__meta_scaleway_instance_location_cluster_id": "19",
"__meta_scaleway_instance_location_hypervisor_id": "1201",
"__meta_scaleway_instance_location_node_id": "24",
"__meta_scaleway_instance_name": "routed-dualstack",
"__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_public_ipv4": "51.158.183.115",
"__meta_scaleway_instance_region": "nl-ams",
"__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092",
"__meta_scaleway_instance_security_group_name": "Default security group",
"__meta_scaleway_instance_status": "running",
"__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "nl-ams-1",
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])

View file

@ -216,6 +216,146 @@
"placement_group": null,
"private_nics": [],
"zone": "fr-par-1"
},
{
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack",
"arch": "x86_64",
"commercial_type": "DEV1-S",
"boot_type": "local",
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"hostname": "routed-dualstack",
"image": {
"id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"project": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"root_volume": {
"id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "unified",
"size": 10000000000
},
"extra_volumes": {},
"public": true,
"arch": "x86_64",
"creation_date": "2024-02-22T15:52:56.037007+00:00",
"modification_date": "2024-02-22T15:52:56.037007+00:00",
"default_bootscript": null,
"from_server": null,
"state": "available",
"tags": [],
"zone": "nl-ams-1"
},
"volumes": {
"0": {
"boot": false,
"id": "fe85c817-e67e-4e24-8f13-bde3e9f42620",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "l_ssd",
"export_uri": null,
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"server": {
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack"
},
"size": 20000000000,
"state": "available",
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:50:14.019739+00:00",
"tags": [],
"zone": "nl-ams-1"
}
},
"tags": [],
"state": "running",
"protected": false,
"state_detail": "booted",
"public_ip": {
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
"public_ips": [
{
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
{
"id": "f52a8c81-0875-4aee-b96e-eccfc6bec367",
"address": "2001:bc8:1640:1568:dc00:ff:fe21:91b",
"dynamic": false,
"gateway": "fe80::dc00:ff:fe21:91c",
"netmask": "64",
"family": "inet6",
"provisioning_mode": "slaac",
"tags": [],
"state": "attached",
"ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6"
}
],
"mac_address": "de:00:00:21:09:1b",
"routed_ip_enabled": true,
"ipv6": null,
"extra_networks": [],
"dynamic_ip_required": false,
"enable_ipv6": false,
"private_ip": null,
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:52:21.181670+00:00",
"bootscript": {
"id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe",
"public": true,
"title": "x86_64 mainline 4.4.182 rev1",
"architecture": "x86_64",
"organization": "11111111-1111-4111-8111-111111111111",
"project": "11111111-1111-4111-8111-111111111111",
"kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182",
"dtb": "",
"initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"zone": "nl-ams-1"
},
"security_group": {
"id": "984414da-9fc2-49c0-a925-fed6266fe092",
"name": "Default security group"
},
"location": {
"zone_id": "ams1",
"platform_id": "23",
"cluster_id": "19",
"hypervisor_id": "1201",
"node_id": "24"
},
"maintenances": [],
"allowed_actions": [
"poweroff",
"terminate",
"reboot",
"stop_in_place",
"backup"
],
"placement_group": null,
"private_nics": [],
"zone": "nl-ams-1"
}
]
}

View file

@ -20,6 +20,7 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)),
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),

View file

@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{}
labels[serversetPathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port)))
net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port))
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
for name, endpoint := range member.AdditionalEndpoints {
cleanName := model.LabelName(strutil.SanitizeLabelName(name))
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
fmt.Sprintf("%d", endpoint.Port))
strconv.Itoa(endpoint.Port))
}
labels[serversetStatusLabel] = model.LabelValue(member.Status)
@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{}
labels[nervePathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port)))
net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port))
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
return labels, nil

View file

@ -48,7 +48,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` |
| <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` |
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. Use with server mode only. | `4` |
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |

View file

@ -566,6 +566,7 @@ Dump samples from a TSDB.
| Flag | Description | Default |
| --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@ -584,7 +585,7 @@ Dump samples from a TSDB.
##### `promtool tsdb dump-openmetrics`
[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.
[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.
@ -592,6 +593,7 @@ Dump samples from a TSDB.
| Flag | Description | Default |
| --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |

View file

@ -71,6 +71,10 @@ global:
# How frequently to evaluate rules.
[ evaluation_interval: <duration> | default = 1m ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
# Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping.
[ rule_query_offset: <duration> | default = 0s ]
# The labels to add to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
@ -1349,7 +1353,7 @@ interface.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_openstack_address_pool`: the pool of the private IP.
* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance.
* `__meta_openstack_instance_flavor`: the flavor ID of the OpenStack instance.
* `__meta_openstack_instance_id`: the OpenStack instance ID.
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
* `__meta_openstack_instance_name`: the OpenStack instance name.
@ -1357,7 +1361,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_openstack_private_ip`: the private IP of the OpenStack instance.
* `__meta_openstack_project_id`: the project (tenant) owning this instance.
* `__meta_openstack_public_ip`: the public IP of the OpenStack instance.
* `__meta_openstack_tag_<tagkey>`: each tag value of the instance.
* `__meta_openstack_tag_<key>`: each metadata item of the instance, with any unsupported characters converted to an underscore.
* `__meta_openstack_user_id`: the user account owning the tenant.
See below for the configuration options for OpenStack discovery:
@ -1467,6 +1471,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud
* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server
* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server
* `__meta_ovhcloud_dedicated_server_name`: the name of the server
* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server
* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server
* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server
* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server
@ -2952,9 +2957,10 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_scaleway_instance_type`: commercial type of the server
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
This role uses the private IPv4 address by default. This can be
This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
configuration file](/documentation/examples/prometheus-scaleway.yml).
Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it.
#### Baremetal role
@ -3672,7 +3678,8 @@ queue_config:
[ min_shards: <int> | default = 1 ]
# Maximum number of samples per send.
[ max_samples_per_send: <int> | default = 2000]
# Maximum time a sample will wait in buffer.
# Maximum time a sample will wait for a send. The sample might wait less
# if the buffer is full. Further time might pass due to potential retries.
[ batch_send_deadline: <duration> | default = 5s ]
# Initial retry delay. Gets doubled for every retry.
[ min_backoff: <duration> | default = 30ms ]

View file

@ -86,6 +86,9 @@ name: <string>
# rule can produce. 0 is no limit.
[ limit: <int> | default = 0 ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past.
[ query_offset: <duration> | default = global.rule_query_offset ]
rules:
[ - <rule> ... ]
```
@ -148,6 +151,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be
recorded as an error in the evaluation, and as such no stale markers are
written.
# Rule query offset
This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals.
# Failed rule evaluations due to slow evaluation
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.

View file

@ -5,63 +5,7 @@ sort_rank: 7
# Remote Read API
This is not currently considered part of the stable API and is subject to change
even between non-major version releases of Prometheus.
## Format overview
The API response format is JSON. Every successful API request returns a `2xx`
status code.
Invalid requests that reach the API handlers return a JSON error object
and one of the following HTTP response codes:
- `400 Bad Request` when parameters are missing or incorrect.
- `422 Unprocessable Entity` when an expression can't be executed
([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)).
- `503 Service Unavailable` when queries time out or abort.
Other non-`2xx` codes may be returned for errors occurring before the API
endpoint is reached.
An array of warnings may be returned if there are errors that do
not inhibit the request execution. All of the data that was successfully
collected will be returned in the data field.
The JSON response envelope format is as follows:
```
{
"status": "success" | "error",
"data": <data>,
// Only set if status is "error". The data field may still hold
// additional data.
"errorType": "<string>",
"error": "<string>",
// Only if there were warnings while executing the request.
// There will still be data in the data field.
"warnings": ["<string>"]
}
```
Generic placeholders are defined as follows:
* `<rfc3339 | unix_timestamp>`: Input timestamps may be provided either in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp
in seconds, with optional decimal places for sub-second precision. Output
timestamps are always represented as Unix timestamps in seconds.
* `<series_selector>`: Prometheus [time series
selectors](basics.md#time-series-selectors) like `http_requests_total` or
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
* `<duration>`: [Prometheus duration strings](basics.md#time_durations).
For example, `5m` refers to a duration of 5 minutes.
* `<bool>`: boolean values (strings `true` and `false`).
Note: Names of query parameters that may be repeated end with `[]`.
## Remote Read API
> This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus.
This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression.
The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
@ -79,5 +23,3 @@ This returns a message that includes a list of raw samples.
These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf)
compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second.

View file

@ -84,8 +84,10 @@ or 31 days, whichever is smaller.
Prometheus has several flags that configure local storage. The most important are:
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`.
Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
Supported units: y, w, d, h, m, s, ms.
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only
@ -195,6 +197,9 @@ or time-series database to Prometheus. To do so, the user must first convert the
source data into [OpenMetrics](https://openmetrics.io/) format, which is the
input format for the backfilling as described below.
Note that native histograms and staleness markers are not supported by this
procedure, as they cannot be represented in the OpenMetrics format.
### Usage
Backfilling can be used via the Promtool command line. Promtool will write the blocks

View file

@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
// since the service may be registered remotely through a different node.
var addr string
if node.ServiceAddress != "" {
addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort))
addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
} else {
addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort))
addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
}
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}

View file

@ -10,7 +10,7 @@ require (
github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/common v0.53.0
github.com/prometheus/prometheus v0.51.1
github.com/prometheus/prometheus v0.51.2
github.com/stretchr/testify v1.9.0
)

View file

@ -279,8 +279,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.51.1 h1:V2e7x2oiUC0Megp26+xjffxBf9EGkyP1iQuGd4VjUSU=
github.com/prometheus/prometheus v0.51.1/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
github.com/prometheus/prometheus v0.51.2 h1:U0faf1nT4CB9DkBW87XLJCBi2s8nwWXdTbyzRUAkX0w=
github.com/prometheus/prometheus v0.51.2/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM=

View file

@ -44,5 +44,10 @@
// The default refresh time for all dashboards, default to 60s
refresh: '60s',
},
// Opt-out of multi-cluster dashboards by overriding this.
showMultiCluster: true,
// The cluster label to infer the cluster name from.
clusterLabel: 'cluster',
},
}

View file

@ -10,21 +10,32 @@ local template = grafana.template;
{
grafanaDashboards+:: {
'prometheus.json':
g.dashboard(
local showMultiCluster = $._config.showMultiCluster;
local dashboard = g.dashboard(
'%(prefix)sOverview' % $._config.grafanaPrometheus
)
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'cluster')
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
);
local templatedDashboard = if showMultiCluster then
dashboard
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel)
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
else
dashboard
.addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job')
.addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance');
templatedDashboard
.addRow(
g.row('Prometheus Stats')
.addPanel(
g.panel('Prometheus Stats') +
g.tablePanel([
g.tablePanel(if showMultiCluster then [
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
] else [
'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})',
'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})',
], {
cluster: { alias: 'Cluster' },
cluster: { alias: if showMultiCluster then 'Cluster' else '' },
job: { alias: 'Job' },
instance: { alias: 'Instance' },
version: { alias: 'Version' },
@ -37,12 +48,18 @@ local template = grafana.template;
g.row('Discovery')
.addPanel(
g.panel('Target Sync') +
g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3', '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}') +
g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3'
else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'
else '{{scrape_job}}') +
{ yaxes: g.yaxes('ms') }
)
.addPanel(
g.panel('Targets') +
g.queryPanel('sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})', '{{cluster}}:{{job}}:{{instance}}') +
g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})'
else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}'
else 'Targets') +
g.stack
)
)
@ -50,29 +67,47 @@ local template = grafana.template;
g.row('Retrieval')
.addPanel(
g.panel('Average Scrape Interval Duration') +
g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{cluster}}:{{job}}:{{instance}} {{interval}} configured') +
g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3'
else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured'
else '{{interval}} configured') +
{ yaxes: g.yaxes('ms') }
)
.addPanel(
g.panel('Scrape failures') +
g.queryPanel([
g.queryPanel(if showMultiCluster then [
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
], [
] else [
'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))',
], if showMultiCluster then [
'exceeded body size limit: {{cluster}} {{job}} {{instance}}',
'exceeded sample limit: {{cluster}} {{job}} {{instance}}',
'duplicate timestamp: {{cluster}} {{job}} {{instance}}',
'out of bounds: {{cluster}} {{job}} {{instance}}',
'out of order: {{cluster}} {{job}} {{instance}}',
] else [
'exceeded body size limit: {{job}}',
'exceeded sample limit: {{job}}',
'duplicate timestamp: {{job}}',
'out of bounds: {{job}}',
'out of order: {{job}}',
]) +
g.stack
)
.addPanel(
g.panel('Appended Samples') +
g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])', '{{cluster}} {{job}} {{instance}}') +
g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])'
else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
g.stack
)
)
@ -80,12 +115,18 @@ local template = grafana.template;
g.row('Storage')
.addPanel(
g.panel('Head Series') +
g.queryPanel('prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head series') +
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series'
else '{{job}} {{instance}} head series') +
g.stack
)
.addPanel(
g.panel('Head Chunks') +
g.queryPanel('prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head chunks') +
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks'
else '{{job}} {{instance}} head chunks') +
g.stack
)
)
@ -93,12 +134,18 @@ local template = grafana.template;
g.row('Query')
.addPanel(
g.panel('Query Rate') +
g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{cluster}} {{job}} {{instance}}') +
g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])'
else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
g.stack,
)
.addPanel(
g.panel('Stage Duration') +
g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') +
g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3'
else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3',
if showMultiCluster then '{{slice}}'
else '{{slice}}') +
{ yaxes: g.yaxes('ms') } +
g.stack,
)

14
go.mod
View file

@ -41,7 +41,7 @@ require (
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.8
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.32.0
github.com/linode/linodego v1.33.0
github.com/miekg/dns v1.1.59
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
@ -60,7 +60,6 @@ require (
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/featuregate v1.5.0
go.opentelemetry.io/collector/pdata v1.5.0
go.opentelemetry.io/collector/semconv v0.98.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0
@ -80,10 +79,10 @@ require (
golang.org/x/sys v0.19.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.20.0
google.golang.org/api v0.174.0
google.golang.org/api v0.177.0
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
google.golang.org/protobuf v1.34.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3
@ -94,8 +93,8 @@ require (
)
require (
cloud.google.com/go/auth v0.2.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.0 // indirect
cloud.google.com/go/auth v0.3.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
@ -151,7 +150,6 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
@ -191,7 +189,7 @@ require (
golang.org/x/mod v0.17.0 // indirect
golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect

26
go.sum
View file

@ -12,10 +12,10 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.2.0 h1:y6oTcpMSbOcXbwYgUUrvI+mrQ2xbrcdpPgtVbCGTLTk=
cloud.google.com/go/auth v0.2.0/go.mod h1:+yb+oy3/P0geX6DLKlqiGHARGR6EX2GRtYCzWOCQSbU=
cloud.google.com/go/auth/oauth2adapt v0.2.0 h1:FR8zevgQwu+8CqiOT5r6xCmJa3pJC/wdXEEPF1OkNhA=
cloud.google.com/go/auth/oauth2adapt v0.2.0/go.mod h1:AfqujpDAlTfLfeCIl/HJZZlIxD8+nJoZ5e0x1IxGq5k=
cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs=
cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -471,8 +471,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.32.0 h1:OmZzB3iON6uu84VtLFf64uKmAQqJJarvmsVguroioPI=
github.com/linode/linodego v1.32.0/go.mod h1:y8GDP9uLVH4jTB9qyrgw79qfKdYJmNCGUOJmfuiOcmI=
github.com/linode/linodego v1.33.0 h1:cX2FYry7r6CA1ujBMsdqiM4VhvIQtnWsOuVblzfBhCw=
github.com/linode/linodego v1.33.0/go.mod h1:dSJJgIwqZCF5wnpuC6w5cyIbRtcexAm7uVvuJopGB40=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -722,8 +722,6 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/featuregate v1.5.0 h1:uK8qnYQKz1TMkK+FDTFsywg/EybW/gbnOUaPNUkRznM=
go.opentelemetry.io/collector/featuregate v1.5.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
go.opentelemetry.io/collector/pdata v1.5.0 h1:1fKTmUpr0xCOhP/B0VEvtz7bYPQ45luQ8XFyA07j8LE=
go.opentelemetry.io/collector/pdata v1.5.0/go.mod h1:TYj8aKRWZyT/KuKQXKyqSEvK/GV+slFaDMEI+Ke64Yw=
go.opentelemetry.io/collector/semconv v0.98.0 h1:zO4L4TmlxXoYu8UgPeYElGY19BW7wPjM+quL5CzoOoY=
@ -1045,8 +1043,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.174.0 h1:zB1BWl7ocxfTea2aQ9mgdzXjnfPySllpPOskdnO+q34=
google.golang.org/api v0.174.0/go.mod h1:aC7tB6j0HR1Nl0ni5ghpx6iLasmAX78Zkh/wgxAAjLg=
google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk=
google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1085,8 +1083,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU=
google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be h1:LG9vZxsWGOmUKieR8wPAUR3u3MpnYFQZROPIMaXh7/A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240415180920-8c6c420018be/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1118,8 +1116,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -14,9 +14,9 @@
package histogram
import (
"fmt"
"math"
"math/rand"
"strconv"
"testing"
"github.com/stretchr/testify/require"
@ -2134,7 +2134,7 @@ func TestAllFloatBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64]
if c.includeNeg {
@ -2360,7 +2360,7 @@ func TestAllReverseFloatBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
var expBuckets, actBuckets []Bucket[float64]
if c.includePos {

View file

@ -14,8 +14,8 @@
package histogram
import (
"fmt"
"math"
"strconv"
"testing"
"github.com/stretchr/testify/require"
@ -72,7 +72,7 @@ func TestHistogramString(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actualString := c.histogram.String()
require.Equal(t, c.expectedString, actualString)
})
@ -211,7 +211,7 @@ func TestCumulativeBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.CumulativeBucketIterator()
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
for it.Next() {
@ -371,7 +371,7 @@ func TestRegularBucketIterator(t *testing.T) {
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.PositiveBucketIterator()
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
for it.Next() {

View file

@ -17,6 +17,7 @@ import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"testing"
@ -732,7 +733,7 @@ func TestScratchBuilder(t *testing.T) {
want: FromStrings("ddd", "444"),
},
} {
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
b := NewScratchBuilder(len(tcase.add))
for _, lbl := range tcase.add {
b.Add(lbl.Name, lbl.Value)

View file

@ -14,7 +14,8 @@
package labels
import (
"fmt"
"bytes"
"strconv"
)
// MatchType is an enum for label matching types.
@ -78,7 +79,29 @@ func MustNewMatcher(mt MatchType, name, val string) *Matcher {
}
func (m *Matcher) String() string {
return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
// Start a buffer with a pre-allocated size on stack to cover most needs.
var bytea [1024]byte
b := bytes.NewBuffer(bytea[:0])
if m.shouldQuoteName() {
b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name))
} else {
b.WriteString(m.Name)
}
b.WriteString(m.Type.String())
b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value))
return b.String()
}
func (m *Matcher) shouldQuoteName() bool {
for i, c := range m.Name {
if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') {
continue
}
return true
}
return false
}
// Matches returns whether the matcher matches the given string value.

View file

@ -15,6 +15,7 @@ package labels
import (
"fmt"
"math/rand"
"testing"
"github.com/stretchr/testify/require"
@ -225,3 +226,128 @@ func BenchmarkNewMatcher(b *testing.B) {
}
})
}
func BenchmarkMatcher_String(b *testing.B) {
type benchCase struct {
name string
matchers []*Matcher
}
cases := []benchCase{
{
name: "short name equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "foo", "bar"),
MustNewMatcher(MatchEqual, "bar", "baz"),
MustNewMatcher(MatchEqual, "abc", "def"),
MustNewMatcher(MatchEqual, "ghi", "klm"),
MustNewMatcher(MatchEqual, "nop", "qrs"),
},
},
{
name: "short quoted name not equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "f.o", "bar"),
MustNewMatcher(MatchEqual, "b.r", "baz"),
MustNewMatcher(MatchEqual, "a.c", "def"),
MustNewMatcher(MatchEqual, "g.i", "klm"),
MustNewMatcher(MatchEqual, "n.p", "qrs"),
},
},
{
name: "short quoted name with quotes not equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, `"foo"`, "bar"),
MustNewMatcher(MatchEqual, `"foo"`, "baz"),
MustNewMatcher(MatchEqual, `"foo"`, "def"),
MustNewMatcher(MatchEqual, `"foo"`, "klm"),
MustNewMatcher(MatchEqual, `"foo"`, "qrs"),
},
},
{
name: "short name value with quotes equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "foo", `"bar"`),
MustNewMatcher(MatchEqual, "bar", `"baz"`),
MustNewMatcher(MatchEqual, "abc", `"def"`),
MustNewMatcher(MatchEqual, "ghi", `"klm"`),
MustNewMatcher(MatchEqual, "nop", `"qrs"`),
},
},
{
name: "short name and long value regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "foo", "five_six_seven_eight_nine_ten_one_two_three_four"),
MustNewMatcher(MatchRegexp, "bar", "one_two_three_four_five_six_seven_eight_nine_ten"),
MustNewMatcher(MatchRegexp, "abc", "two_three_four_five_six_seven_eight_nine_ten_one"),
MustNewMatcher(MatchRegexp, "ghi", "three_four_five_six_seven_eight_nine_ten_one_two"),
MustNewMatcher(MatchRegexp, "nop", "four_five_six_seven_eight_nine_ten_one_two_three"),
},
},
{
name: "short name and long value with quotes equal",
matchers: []*Matcher{
MustNewMatcher(MatchEqual, "foo", `five_six_seven_eight_nine_ten_"one"_two_three_four`),
MustNewMatcher(MatchEqual, "bar", `one_two_three_four_five_six_"seven"_eight_nine_ten`),
MustNewMatcher(MatchEqual, "abc", `two_three_four_five_six_seven_"eight"_nine_ten_one`),
MustNewMatcher(MatchEqual, "ghi", `three_four_five_six_seven_eight_"nine"_ten_one_two`),
MustNewMatcher(MatchEqual, "nop", `four_five_six_seven_eight_nine_"ten"_one_two_three`),
},
},
{
name: "long name regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "val"),
MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "val"),
MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "val"),
MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "val"),
MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "val"),
},
},
{
name: "long quoted name regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "val"),
MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "val"),
MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "val"),
MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "val"),
MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "val"),
},
},
{
name: "long name and long value regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one_two_three_four_five_six_seven_eight_nine_ten", "five_six_seven_eight_nine_ten_one_two_three_four"),
MustNewMatcher(MatchRegexp, "two_three_four_five_six_seven_eight_nine_ten_one", "one_two_three_four_five_six_seven_eight_nine_ten"),
MustNewMatcher(MatchRegexp, "three_four_five_six_seven_eight_nine_ten_one_two", "two_three_four_five_six_seven_eight_nine_ten_one"),
MustNewMatcher(MatchRegexp, "four_five_six_seven_eight_nine_ten_one_two_three", "three_four_five_six_seven_eight_nine_ten_one_two"),
MustNewMatcher(MatchRegexp, "five_six_seven_eight_nine_ten_one_two_three_four", "four_five_six_seven_eight_nine_ten_one_two_three"),
},
},
{
name: "long quoted name and long value regexp",
matchers: []*Matcher{
MustNewMatcher(MatchRegexp, "one.two.three.four.five.six.seven.eight.nine.ten", "five.six.seven.eight.nine.ten.one.two.three.four"),
MustNewMatcher(MatchRegexp, "two.three.four.five.six.seven.eight.nine.ten.one", "one.two.three.four.five.six.seven.eight.nine.ten"),
MustNewMatcher(MatchRegexp, "three.four.five.six.seven.eight.nine.ten.one.two", "two.three.four.five.six.seven.eight.nine.ten.one"),
MustNewMatcher(MatchRegexp, "four.five.six.seven.eight.nine.ten.one.two.three", "three.four.five.six.seven.eight.nine.ten.one.two"),
MustNewMatcher(MatchRegexp, "five.six.seven.eight.nine.ten.one.two.three.four", "four.five.six.seven.eight.nine.ten.one.two.three"),
},
},
}
var mixed []*Matcher
for _, bc := range cases {
mixed = append(mixed, bc.matchers...)
}
rand.Shuffle(len(mixed), func(i, j int) { mixed[i], mixed[j] = mixed[j], mixed[i] })
cases = append(cases, benchCase{name: "mixed", matchers: mixed})
for _, bc := range cases {
b.Run(bc.name, func(b *testing.B) {
for i := 0; i <= b.N; i++ {
m := bc.matchers[i%len(bc.matchers)]
_ = m.String()
}
})
}
}

View file

@ -16,6 +16,7 @@ package labels
import (
"slices"
"strings"
"unicode/utf8"
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
@ -827,8 +828,12 @@ type zeroOrOneCharacterStringMatcher struct {
}
func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool {
// Zero or one.
if len(s) > 1 {
// If there's more than one rune in the string, then it can't match.
if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError {
// Size is 0 for empty strings, 1 for invalid rune.
// Empty string matches, invalid rune matches if there isn't anything else.
return size == len(s)
} else if size < len(s) {
return false
}

View file

@ -19,6 +19,7 @@ import (
"strings"
"testing"
"time"
"unicode/utf8"
"github.com/grafana/regexp"
"github.com/grafana/regexp/syntax"
@ -36,6 +37,7 @@ var (
".*foo",
"^.*foo$",
"^.+foo$",
".?",
".*",
".+",
"foo.+",
@ -84,10 +86,16 @@ var (
"foo", " foo bar", "bar", "buzz\nbar", "bar foo", "bfoo", "\n", "\nfoo", "foo\n", "hello foo world", "hello foo\n world", "",
"FOO", "Foo", "OO", "Oo", "\nfoo\n", strings.Repeat("f", 20), "prometheus", "prometheus_api_v1", "prometheus_api_v1_foo",
"10.0.1.20", "10.0.2.10", "10.0.3.30", "10.0.4.40",
"foofoo0", "foofoo",
"foofoo0", "foofoo", "😀foo0",
// Values matching / not matching the test regexps on long alternations.
"zQPbMkNO", "zQPbMkNo", "jyyfj00j0061", "jyyfj00j006", "jyyfj00j00612", "NNSPdvMi", "NNSPdvMiXXX", "NNSPdvMixxx", "nnSPdvMi", "nnSPdvMiXXX",
// Invalid utf8
"\xfefoo",
"foo\xfe",
"\xfd",
"\xff\xff",
}
)
@ -926,19 +934,91 @@ func BenchmarkOptimizeEqualStringMatchers(b *testing.B) {
}
func TestZeroOrOneCharacterStringMatcher(t *testing.T) {
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.True(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
t.Run("match newline", func(t *testing.T) {
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.True(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
})
matcher = &zeroOrOneCharacterStringMatcher{matchNL: false}
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.False(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
t.Run("do not match newline", func(t *testing.T) {
matcher := &zeroOrOneCharacterStringMatcher{matchNL: false}
require.True(t, matcher.Matches(""))
require.True(t, matcher.Matches("x"))
require.False(t, matcher.Matches("\n"))
require.False(t, matcher.Matches("xx"))
require.False(t, matcher.Matches("\n\n"))
})
t.Run("unicode", func(t *testing.T) {
// Just for documentation purposes, emoji1 is 1 rune, emoji2 is 2 runes.
// Having this in mind, will make future readers fixing tests easier.
emoji1 := "😀"
emoji2 := "❤️"
require.Equal(t, 1, utf8.RuneCountInString(emoji1))
require.Equal(t, 2, utf8.RuneCountInString(emoji2))
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
require.True(t, matcher.Matches(emoji1))
require.False(t, matcher.Matches(emoji2))
require.False(t, matcher.Matches(emoji1+emoji1))
require.False(t, matcher.Matches("x"+emoji1))
require.False(t, matcher.Matches(emoji1+"x"))
require.False(t, matcher.Matches(emoji1+emoji2))
})
t.Run("invalid unicode", func(t *testing.T) {
// Just for reference, we also compare to what `^.?$` regular expression matches.
re := regexp.MustCompile("^.?$")
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
requireMatches := func(s string, expected bool) {
t.Helper()
require.Equal(t, expected, matcher.Matches(s))
require.Equal(t, re.MatchString(s), matcher.Matches(s))
}
requireMatches("\xff", true)
requireMatches("x\xff", false)
requireMatches("\xffx", false)
requireMatches("\xff\xfe", false)
})
}
func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) {
type benchCase struct {
str string
matches bool
}
emoji1 := "😀"
emoji2 := "❤️"
cases := []benchCase{
{"", true},
{"x", true},
{"\n", true},
{"xx", false},
{"\n\n", false},
{emoji1, true},
{emoji2, false},
{emoji1 + emoji1, false},
{strings.Repeat("x", 100), false},
{strings.Repeat(emoji1, 100), false},
{strings.Repeat(emoji2, 100), false},
}
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
b.ResetTimer()
for n := 0; n < b.N; n++ {
c := cases[n%len(cases)]
got := matcher.Matches(c.str)
if got != c.matches {
b.Fatalf("unexpected result for %q: got %t, want %t", c.str, got, c.matches)
}
}
}
func TestLiteralPrefixStringMatcher(t *testing.T) {

View file

@ -17,6 +17,7 @@ import (
"crypto/md5"
"encoding/binary"
"fmt"
"strconv"
"strings"
"github.com/grafana/regexp"
@ -48,7 +49,7 @@ const (
Drop Action = "drop"
// KeepEqual drops targets for which the input does not match the target.
KeepEqual Action = "keepequal"
// Drop drops targets for which the input does match the target.
// DropEqual drops targets for which the input does match the target.
DropEqual Action = "dropequal"
// HashMod sets a label to the modulus of a hash of labels.
HashMod Action = "hashmod"
@ -290,7 +291,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) {
hash := md5.Sum([]byte(val))
// Use only the last 8 bytes of the hash to give the same result as earlier versions of this code.
mod := binary.BigEndian.Uint64(hash[8:]) % cfg.Modulus
lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
lb.Set(cfg.TargetLabel, strconv.FormatUint(mod, 10))
case LabelMap:
lb.Range(func(l labels.Label) {
if cfg.Regex.MatchString(l.Name) {

View file

@ -14,7 +14,7 @@
package relabel
import (
"fmt"
"strconv"
"testing"
"github.com/prometheus/common/model"
@ -657,7 +657,7 @@ func TestRelabelValidate(t *testing.T) {
},
}
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
err := test.config.Validate()
if test.expected == "" {
require.NoError(t, err)

View file

@ -136,10 +136,11 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
type RuleGroup struct {
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
Name string `yaml:"name"`
Interval model.Duration `yaml:"interval,omitempty"`
QueryOffset *model.Duration `yaml:"query_offset,omitempty"`
Limit int `yaml:"limit,omitempty"`
Rules []RuleNode `yaml:"rules"`
}
// Rule describes an alerting or recording rule.

View file

@ -74,7 +74,7 @@ func TestHandlerNextBatch(t *testing.T) {
for i := range make([]struct{}, 2*maxBatchSize+1) {
h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -186,10 +186,10 @@ func TestHandlerSendAll(t *testing.T) {
for i := range make([]struct{}, maxBatchSize) {
h.queue = append(h.queue, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
expected = append(expected, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -297,23 +297,23 @@ func TestHandlerSendAllRemapPerAm(t *testing.T) {
for i := range make([]struct{}, maxBatchSize/2) {
h.queue = append(h.queue,
&Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
},
&Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
},
)
expected1 = append(expected1,
&Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
}, &Alert{
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", "test", "alertnamedrop", strconv.Itoa(i)),
},
)
expected2 = append(expected2, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -502,7 +502,7 @@ func TestHandlerQueuing(t *testing.T) {
var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}
@ -762,7 +762,7 @@ func TestHangingNotifier(t *testing.T) {
var alerts []*Alert
for i := range make([]struct{}, 20*maxBatchSize) {
alerts = append(alerts, &Alert{
Labels: labels.FromStrings("alertname", fmt.Sprintf("%d", i)),
Labels: labels.FromStrings("alertname", strconv.Itoa(i)),
})
}

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
package promql_test
import (
"context"
@ -23,13 +23,14 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
"github.com/prometheus/prometheus/util/teststorage"
)
func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *Engine, interval, numIntervals int) error {
func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, interval, numIntervals int) error {
ctx := context.Background()
metrics := []labels.Labels{}
@ -249,13 +250,13 @@ func BenchmarkRangeQuery(b *testing.B) {
stor := teststorage.New(b)
stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings.
defer stor.Close()
opts := EngineOpts{
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
}
engine := NewEngine(opts)
engine := promql.NewEngine(opts)
const interval = 10000 // 10s interval.
// A day of data plus 10k steps.
@ -324,7 +325,7 @@ func BenchmarkNativeHistograms(b *testing.B) {
},
}
opts := EngineOpts{
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
@ -338,7 +339,7 @@ func BenchmarkNativeHistograms(b *testing.B) {
for _, tc := range cases {
b.Run(tc.name, func(b *testing.B) {
ng := NewEngine(opts)
ng := promql.NewEngine(opts)
for i := 0; i < b.N; i++ {
qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
if err != nil {

View file

@ -573,7 +573,8 @@ func (ng *Engine) validateOpts(expr parser.Expr) error {
return validationErr
}
func (ng *Engine) newTestQuery(f func(context.Context) error) Query {
// NewTestQuery: inject special behaviour into Query for testing.
func (ng *Engine) NewTestQuery(f func(context.Context) error) Query {
qry := &query{
q: "test statement",
stmt: parser.TestStmt(f),
@ -2024,25 +2025,21 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(vs *parser.Vec
vec := make(Vector, 0, len(vs.Series))
for i, s := range vs.Series {
it := seriesIterators[i]
t, f, h, ok := ev.vectorSelectorSingle(it, vs, enh.Ts)
if ok {
vec = append(vec, Sample{
Metric: s.Labels(),
T: t,
F: f,
H: h,
})
histSize := 0
if h != nil {
histSize := h.Size() / 16 // 16 bytes per sample.
ev.currentSamples += histSize
}
ev.currentSamples++
t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts)
if !ok {
continue
}
ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, int64(1+histSize))
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
// Note that we ignore the sample values because call only cares about the timestamp.
vec = append(vec, Sample{
Metric: s.Labels(),
T: t,
})
ev.currentSamples++
ev.samplesStats.IncrementSamplesAtTimestamp(enh.Ts, 1)
if ev.currentSamples > ev.maxSamples {
ev.error(ErrTooManySamples(env))
}
}
ev.samplesStats.UpdatePeak(ev.currentSamples)

View file

@ -0,0 +1,82 @@
// Copyright 2024 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
import (
"errors"
"testing"
"github.com/go-kit/log"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/util/annotations"
)
func TestRecoverEvaluatorRuntime(t *testing.T) {
var output []interface{}
logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error {
output = append(output, keyvals...)
return nil
}))
ev := &evaluator{logger: logger}
expr, _ := parser.ParseExpr("sum(up)")
var err error
defer func() {
require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0")
require.Contains(t, output, "sum(up)")
}()
defer ev.recover(expr, nil, &err)
// Cause a runtime panic.
var a []int
a[123] = 1
}
func TestRecoverEvaluatorError(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()}
var err error
e := errors.New("custom error")
defer func() {
require.EqualError(t, err, e.Error())
}()
defer ev.recover(nil, nil, &err)
panic(e)
}
func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()}
var err error
var ws annotations.Annotations
warnings := annotations.New().Add(errors.New("custom warning"))
e := errWithWarnings{
err: errors.New("custom error"),
warnings: warnings,
}
defer func() {
require.EqualError(t, err, e.Error())
require.Equal(t, warnings, ws, "wrong warning message")
}()
defer ev.recover(nil, &ws, &err)
panic(e)
}

File diff suppressed because it is too large Load diff

View file

@ -948,15 +948,6 @@ func funcTimestamp(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe
return enh.Out, nil
}
func kahanSum(samples []float64) float64 {
var sum, c float64
for _, v := range samples {
sum, c = kahanSumInc(v, sum, c)
}
return sum + c
}
func kahanSumInc(inc, sum, c float64) (newSum, newC float64) {
t := sum + inc
// Using Neumaier improvement, swap if next term larger than sum.

View file

@ -11,11 +11,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
package promql_test
import (
"context"
"math"
"testing"
"time"
@ -23,6 +22,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/util/teststorage"
)
@ -33,13 +33,13 @@ func TestDeriv(t *testing.T) {
// so we test it by hand.
storage := teststorage.New(t)
defer storage.Close()
opts := EngineOpts{
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
Timeout: 10 * time.Second,
}
engine := NewEngine(opts)
engine := promql.NewEngine(opts)
a := storage.Appender(context.Background())
@ -70,19 +70,13 @@ func TestDeriv(t *testing.T) {
func TestFunctionList(t *testing.T) {
// Test that Functions and parser.Functions list the same functions.
for i := range FunctionCalls {
for i := range promql.FunctionCalls {
_, ok := parser.Functions[i]
require.True(t, ok, "function %s exists in promql package, but not in parser package", i)
}
for i := range parser.Functions {
_, ok := FunctionCalls[i]
_, ok := promql.FunctionCalls[i]
require.True(t, ok, "function %s exists in parser package, but not in promql package", i)
}
}
func TestKahanSum(t *testing.T) {
vals := []float64{1.0, math.Pow(10, 100), 1.0, -1 * math.Pow(10, 100)}
expected := 2.0
require.Equal(t, expected, kahanSum(vals))
}

View file

@ -204,8 +204,8 @@ func (node *VectorSelector) String() string {
labelStrings = make([]string, 0, len(node.LabelMatchers)-1)
}
for _, matcher := range node.LabelMatchers {
// Only include the __name__ label if its equality matching and matches the name.
if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name {
// Only include the __name__ label if its equality matching and matches the name, but don't skip if it's an explicit empty name matcher.
if matcher.Name == labels.MetricName && matcher.Type == labels.MatchEqual && matcher.Value == node.Name && matcher.Value != "" {
continue
}
labelStrings = append(labelStrings, matcher.String())

View file

@ -135,6 +135,19 @@ func TestExprString(t *testing.T) {
{
in: `a[1m] @ end()`,
},
{
in: `{__name__="",a="x"}`,
},
{
in: `{"a.b"="c"}`,
},
{
in: `{"0"="1"}`,
},
{
in: `{"_0"="1"}`,
out: `{_0="1"}`,
},
}
for _, test := range inputs {
@ -216,6 +229,16 @@ func TestVectorSelector_String(t *testing.T) {
},
expected: `{__name__="foobar"}`,
},
{
name: "empty name matcher",
vs: VectorSelector{
LabelMatchers: []*labels.Matcher{
labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, ""),
labels.MustNewMatcher(labels.MatchEqual, "a", "x"),
},
},
expected: `{__name__="",a="x"}`,
},
} {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expected, tc.vs.String())

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
package promql_test
import (
"context"
@ -22,37 +22,30 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/util/teststorage"
)
func newTestEngine() *Engine {
return NewEngine(EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
Timeout: 100 * time.Second,
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) },
EnableAtModifier: true,
EnableNegativeOffset: true,
EnablePerStepStats: true,
})
func newTestEngine() *promql.Engine {
return promqltest.NewTestEngine(false, 0, promqltest.DefaultMaxSamplesPerQuery)
}
func TestEvaluations(t *testing.T) {
RunBuiltinTests(t, newTestEngine())
promqltest.RunBuiltinTests(t, newTestEngine())
}
// Run a lot of queries at the same time, to check for race conditions.
func TestConcurrentRangeQueries(t *testing.T) {
stor := teststorage.New(t)
defer stor.Close()
opts := EngineOpts{
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 50000000,
Timeout: 100 * time.Second,
}
engine := NewEngine(opts)
engine := promql.NewEngine(opts)
const interval = 10000 // 10s interval.
// A day of data plus 10k steps.

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
package promqltest
import (
"context"
@ -19,7 +19,6 @@ import (
"errors"
"fmt"
"io/fs"
"math"
"strconv"
"strings"
"testing"
@ -33,16 +32,16 @@ import (
"github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/parser/posrange"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/util/almost"
"github.com/prometheus/prometheus/util/teststorage"
"github.com/prometheus/prometheus/util/testutil"
)
var (
minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64.
patSpace = regexp.MustCompile("[\t ]+")
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
@ -50,7 +49,8 @@ var (
)
const (
defaultEpsilon = 0.000001 // Relative error allowed for sample values.
defaultEpsilon = 0.000001 // Relative error allowed for sample values.
DefaultMaxSamplesPerQuery = 10000
)
var testStartTime = time.Unix(0, 0).UTC()
@ -72,8 +72,22 @@ func LoadedStorage(t testutil.T, input string) *teststorage.TestStorage {
return test.storage
}
func NewTestEngine(enablePerStepStats bool, lookbackDelta time.Duration, maxSamples int) *promql.Engine {
return promql.NewEngine(promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: maxSamples,
Timeout: 100 * time.Second,
NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) },
EnableAtModifier: true,
EnableNegativeOffset: true,
EnablePerStepStats: enablePerStepStats,
LookbackDelta: lookbackDelta,
})
}
// RunBuiltinTests runs an acceptance test suite against the provided engine.
func RunBuiltinTests(t *testing.T, engine QueryEngine) {
func RunBuiltinTests(t *testing.T, engine promql.QueryEngine) {
t.Cleanup(func() { parser.EnableExperimentalFunctions = false })
parser.EnableExperimentalFunctions = true
@ -90,11 +104,11 @@ func RunBuiltinTests(t *testing.T, engine QueryEngine) {
}
// RunTest parses and runs the test against the provided engine.
func RunTest(t testutil.T, input string, engine QueryEngine) {
func RunTest(t testutil.T, input string, engine promql.QueryEngine) {
require.NoError(t, runTest(t, input, engine))
}
func runTest(t testutil.T, input string, engine QueryEngine) error {
func runTest(t testutil.T, input string, engine promql.QueryEngine) error {
test, err := newTest(t, input)
// Why do this before checking err? newTest() can create the test storage and then return an error,
@ -368,7 +382,7 @@ func (*evalCmd) testCmd() {}
type loadCmd struct {
gap time.Duration
metrics map[uint64]labels.Labels
defs map[uint64][]Sample
defs map[uint64][]promql.Sample
exemplars map[uint64][]exemplar.Exemplar
}
@ -376,7 +390,7 @@ func newLoadCmd(gap time.Duration) *loadCmd {
return &loadCmd{
gap: gap,
metrics: map[uint64]labels.Labels{},
defs: map[uint64][]Sample{},
defs: map[uint64][]promql.Sample{},
exemplars: map[uint64][]exemplar.Exemplar{},
}
}
@ -389,11 +403,11 @@ func (cmd loadCmd) String() string {
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
h := m.Hash()
samples := make([]Sample, 0, len(vals))
samples := make([]promql.Sample, 0, len(vals))
ts := testStartTime
for _, v := range vals {
if !v.Omitted {
samples = append(samples, Sample{
samples = append(samples, promql.Sample{
T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
F: v.Value,
H: v.Histogram,
@ -419,7 +433,7 @@ func (cmd *loadCmd) append(a storage.Appender) error {
return nil
}
func appendSample(a storage.Appender, s Sample, m labels.Labels) error {
func appendSample(a storage.Appender, s promql.Sample, m labels.Labels) error {
if s.H != nil {
if _, err := a.AppendHistogram(0, m, s.T, nil, s.H); err != nil {
return err
@ -503,7 +517,7 @@ func (ev *evalCmd) expectMetric(pos int, m labels.Labels, vals ...parser.Sequenc
// compareResult compares the result value with the defined expectation.
func (ev *evalCmd) compareResult(result parser.Value) error {
switch val := result.(type) {
case Matrix:
case promql.Matrix:
if ev.ordered {
return fmt.Errorf("expected ordered result, but query returned a matrix")
}
@ -521,8 +535,8 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
seen[hash] = true
exp := ev.expected[hash]
var expectedFloats []FPoint
var expectedHistograms []HPoint
var expectedFloats []promql.FPoint
var expectedHistograms []promql.HPoint
for i, e := range exp.vals {
ts := ev.start.Add(time.Duration(i) * ev.step)
@ -534,9 +548,9 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
t := ts.UnixNano() / int64(time.Millisecond/time.Nanosecond)
if e.Histogram != nil {
expectedHistograms = append(expectedHistograms, HPoint{T: t, H: e.Histogram})
expectedHistograms = append(expectedHistograms, promql.HPoint{T: t, H: e.Histogram})
} else if !e.Omitted {
expectedFloats = append(expectedFloats, FPoint{T: t, F: e.Value})
expectedFloats = append(expectedFloats, promql.FPoint{T: t, F: e.Value})
}
}
@ -551,7 +565,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return fmt.Errorf("expected float value at index %v for %s to have timestamp %v, but it had timestamp %v (result has %s)", i, ev.metrics[hash], expected.T, actual.T, formatSeriesResult(s))
}
if !almostEqual(actual.F, expected.F, defaultEpsilon) {
if !almost.Equal(actual.F, expected.F, defaultEpsilon) {
return fmt.Errorf("expected float value at index %v (t=%v) for %s to be %v, but got %v (result has %s)", i, actual.T, ev.metrics[hash], expected.F, actual.F, formatSeriesResult(s))
}
}
@ -575,7 +589,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
}
}
case Vector:
case promql.Vector:
seen := map[uint64]bool{}
for pos, v := range val {
fp := v.Metric.Hash()
@ -601,7 +615,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
if expH != nil && !expH.Compact(0).Equals(v.H) {
return fmt.Errorf("expected %v for %s but got %s", HistogramTestExpression(expH), v.Metric, HistogramTestExpression(v.H))
}
if !almostEqual(exp0.Value, v.F, defaultEpsilon) {
if !almost.Equal(exp0.Value, v.F, defaultEpsilon) {
return fmt.Errorf("expected %v for %s but got %v", exp0.Value, v.Metric, v.F)
}
@ -613,7 +627,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
}
}
case Scalar:
case promql.Scalar:
if len(ev.expected) != 1 {
return fmt.Errorf("expected vector result, but got scalar %s", val.String())
}
@ -621,7 +635,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
if exp0.Histogram != nil {
return fmt.Errorf("expected Histogram %v but got scalar %s", exp0.Histogram.TestExpression(), val.String())
}
if !almostEqual(exp0.Value, val.V, defaultEpsilon) {
if !almost.Equal(exp0.Value, val.V, defaultEpsilon) {
return fmt.Errorf("expected Scalar %v but got %v", val.V, exp0.Value)
}
@ -631,7 +645,7 @@ func (ev *evalCmd) compareResult(result parser.Value) error {
return nil
}
func formatSeriesResult(s Series) string {
func formatSeriesResult(s promql.Series) string {
floatPlural := "s"
histogramPlural := "s"
@ -678,8 +692,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
// If there is a subquery, then the selectors inside it don't get the @ timestamp.
// If any selector already has the @ timestamp set, then it is untouched.
parser.Inspect(expr, func(node parser.Node, path []parser.Node) error {
_, _, subqTs := subqueryTimes(path)
if subqTs != nil {
if hasAtModifier(path) {
// There is a subquery with timestamp in the path,
// hence don't change any timestamps further.
return nil
@ -701,7 +714,7 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
}
case *parser.Call:
_, ok := AtModifierUnsafeFunctions[n.Func.Name]
_, ok := promql.AtModifierUnsafeFunctions[n.Func.Name]
containsNonStepInvariant = containsNonStepInvariant || ok
}
return nil
@ -729,8 +742,19 @@ func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCa
return testCases, nil
}
func hasAtModifier(path []parser.Node) bool {
for _, node := range path {
if n, ok := node.(*parser.SubqueryExpr); ok {
if n.Timestamp != nil {
return true
}
}
}
return false
}
// exec processes a single step of the test.
func (t *test) exec(tc testCommand, engine QueryEngine) error {
func (t *test) exec(tc testCommand, engine promql.QueryEngine) error {
switch cmd := tc.(type) {
case *clearCmd:
t.clear()
@ -755,7 +779,7 @@ func (t *test) exec(tc testCommand, engine QueryEngine) error {
return nil
}
func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error {
func (t *test) execEval(cmd *evalCmd, engine promql.QueryEngine) error {
if cmd.isRange {
return t.execRangeEval(cmd, engine)
}
@ -763,7 +787,7 @@ func (t *test) execEval(cmd *evalCmd, engine QueryEngine) error {
return t.execInstantEval(cmd, engine)
}
func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error {
func (t *test) execRangeEval(cmd *evalCmd, engine promql.QueryEngine) error {
q, err := engine.NewRangeQuery(t.context, t.storage, nil, cmd.expr, cmd.start, cmd.end, cmd.step)
if err != nil {
return fmt.Errorf("error creating range query for %q (line %d): %w", cmd.expr, cmd.line, err)
@ -788,7 +812,7 @@ func (t *test) execRangeEval(cmd *evalCmd, engine QueryEngine) error {
return nil
}
func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
func (t *test) execInstantEval(cmd *evalCmd, engine promql.QueryEngine) error {
queries, err := atModifierTestCases(cmd.expr, cmd.start)
if err != nil {
return err
@ -830,29 +854,29 @@ func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
// Range queries are always sorted by labels, so skip this test case that expects results in a particular order.
continue
}
mat := rangeRes.Value.(Matrix)
mat := rangeRes.Value.(promql.Matrix)
if err := assertMatrixSorted(mat); err != nil {
return err
}
vec := make(Vector, 0, len(mat))
vec := make(promql.Vector, 0, len(mat))
for _, series := range mat {
// We expect either Floats or Histograms.
for _, point := range series.Floats {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, F: point.F})
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, F: point.F})
break
}
}
for _, point := range series.Histograms {
if point.T == timeMilliseconds(iq.evalTime) {
vec = append(vec, Sample{Metric: series.Metric, T: point.T, H: point.H})
vec = append(vec, promql.Sample{Metric: series.Metric, T: point.T, H: point.H})
break
}
}
}
if _, ok := res.Value.(Scalar); ok {
err = cmd.compareResult(Scalar{V: vec[0].F})
if _, ok := res.Value.(promql.Scalar); ok {
err = cmd.compareResult(promql.Scalar{V: vec[0].F})
} else {
err = cmd.compareResult(vec)
}
@ -864,7 +888,7 @@ func (t *test) execInstantEval(cmd *evalCmd, engine QueryEngine) error {
return nil
}
func assertMatrixSorted(m Matrix) error {
func assertMatrixSorted(m promql.Matrix) error {
if len(m) <= 1 {
return nil
}
@ -894,29 +918,6 @@ func (t *test) clear() {
t.context, t.cancelCtx = context.WithCancel(context.Background())
}
// almostEqual returns true if a and b differ by less than their sum
// multiplied by epsilon.
func almostEqual(a, b, epsilon float64) bool {
// NaN has no equality but for testing we still want to know whether both values
// are NaN.
if math.IsNaN(a) && math.IsNaN(b) {
return true
}
// Cf. http://floating-point-gui.de/errors/comparison/
if a == b {
return true
}
absSum := math.Abs(a) + math.Abs(b)
diff := math.Abs(a - b)
if a == 0 || b == 0 || absSum < minNormal {
return diff < epsilon*minNormal
}
return diff/math.Min(absSum, math.MaxFloat64) < epsilon
}
func parseNumber(s string) (float64, error) {
n, err := strconv.ParseInt(s, 0, 64)
f := float64(n)
@ -937,7 +938,7 @@ type LazyLoader struct {
storage storage.Storage
SubqueryInterval time.Duration
queryEngine *Engine
queryEngine *promql.Engine
context context.Context
cancelCtx context.CancelFunc
@ -1004,7 +1005,7 @@ func (ll *LazyLoader) clear() error {
return err
}
opts := EngineOpts{
opts := promql.EngineOpts{
Logger: nil,
Reg: nil,
MaxSamples: 10000,
@ -1014,7 +1015,7 @@ func (ll *LazyLoader) clear() error {
EnableNegativeOffset: ll.opts.EnableNegativeOffset,
}
ll.queryEngine = NewEngine(opts)
ll.queryEngine = promql.NewEngine(opts)
ll.context, ll.cancelCtx = context.WithCancel(context.Background())
return nil
}
@ -1048,7 +1049,7 @@ func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) {
}
// QueryEngine returns the LazyLoader's query engine.
func (ll *LazyLoader) QueryEngine() *Engine {
func (ll *LazyLoader) QueryEngine() *promql.Engine {
return ll.queryEngine
}
@ -1074,3 +1075,17 @@ func (ll *LazyLoader) Close() error {
ll.cancelCtx()
return ll.storage.Close()
}
func makeInt64Pointer(val int64) *int64 {
valp := new(int64)
*valp = val
return valp
}
func timeMilliseconds(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
}
func durationMilliseconds(d time.Duration) int64 {
return int64(d / (time.Millisecond / time.Nanosecond))
}

View file

@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package promql
package promqltest
import (
"math"
@ -21,14 +21,15 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/tsdb/chunkenc"
)
func TestLazyLoader_WithSamplesTill(t *testing.T) {
type testCase struct {
ts time.Time
series []Series // Each series is checked separately. Need not mention all series here.
checkOnlyError bool // If this is true, series is not checked.
series []promql.Series // Each series is checked separately. Need not mention all series here.
checkOnlyError bool // If this is true, series is not checked.
}
cases := []struct {
@ -44,33 +45,33 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
testCases: []testCase{
{
ts: time.Unix(40, 0),
series: []Series{
series: []promql.Series{
{
Metric: labels.FromStrings("__name__", "metric1"),
Floats: []FPoint{
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5},
Floats: []promql.FPoint{
{T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5},
},
},
},
},
{
ts: time.Unix(10, 0),
series: []Series{
series: []promql.Series{
{
Metric: labels.FromStrings("__name__", "metric1"),
Floats: []FPoint{
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5},
Floats: []promql.FPoint{
{T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5},
},
},
},
},
{
ts: time.Unix(60, 0),
series: []Series{
series: []promql.Series{
{
Metric: labels.FromStrings("__name__", "metric1"),
Floats: []FPoint{
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7},
Floats: []promql.FPoint{
{T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5}, {T: 50000, F: 6}, {T: 60000, F: 7},
},
},
},
@ -86,17 +87,17 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
testCases: []testCase{
{ // Adds all samples of metric1.
ts: time.Unix(70, 0),
series: []Series{
series: []promql.Series{
{
Metric: labels.FromStrings("__name__", "metric1"),
Floats: []FPoint{
{0, 1}, {10000, 1}, {20000, 1}, {30000, 1}, {40000, 1}, {50000, 1},
Floats: []promql.FPoint{
{T: 0, F: 1}, {T: 10000, F: 1}, {T: 20000, F: 1}, {T: 30000, F: 1}, {T: 40000, F: 1}, {T: 50000, F: 1},
},
},
{
Metric: labels.FromStrings("__name__", "metric2"),
Floats: []FPoint{
{0, 1}, {10000, 2}, {20000, 3}, {30000, 4}, {40000, 5}, {50000, 6}, {60000, 7}, {70000, 8},
Floats: []promql.FPoint{
{T: 0, F: 1}, {T: 10000, F: 2}, {T: 20000, F: 3}, {T: 30000, F: 4}, {T: 40000, F: 5}, {T: 50000, F: 6}, {T: 60000, F: 7}, {T: 70000, F: 8},
},
},
},
@ -140,13 +141,13 @@ func TestLazyLoader_WithSamplesTill(t *testing.T) {
require.False(t, ss.Next(), "Expecting only 1 series")
// Convert `storage.Series` to `promql.Series`.
got := Series{
got := promql.Series{
Metric: storageSeries.Labels(),
}
it := storageSeries.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
t, v := it.At()
got.Floats = append(got.Floats, FPoint{T: t, F: v})
got.Floats = append(got.Floats, promql.FPoint{T: t, F: v})
}
require.NoError(t, it.Err())
@ -450,7 +451,7 @@ eval range from 0 to 5m step 5m testmetric
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
err := runTest(t, testCase.input, newTestEngine())
err := runTest(t, testCase.input, NewTestEngine(false, 0, DefaultMaxSamplesPerQuery))
if testCase.expectedError == "" {
require.NoError(t, err)
@ -463,42 +464,42 @@ eval range from 0 to 5m step 5m testmetric
func TestAssertMatrixSorted(t *testing.T) {
testCases := map[string]struct {
matrix Matrix
matrix promql.Matrix
expectedError string
}{
"empty matrix": {
matrix: Matrix{},
matrix: promql.Matrix{},
},
"matrix with one series": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
matrix: promql.Matrix{
promql.Series{Metric: labels.FromStrings("the_label", "value_1")},
},
},
"matrix with two series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
matrix: promql.Matrix{
promql.Series{Metric: labels.FromStrings("the_label", "value_1")},
promql.Series{Metric: labels.FromStrings("the_label", "value_2")},
},
},
"matrix with two series, series in reverse order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_1")},
matrix: promql.Matrix{
promql.Series{Metric: labels.FromStrings("the_label", "value_2")},
promql.Series{Metric: labels.FromStrings("the_label", "value_1")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 1 with labels {the_label="value_1"} sorts before series at index 0 with labels {the_label="value_2"}`,
},
"matrix with three series, series in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
matrix: promql.Matrix{
promql.Series{Metric: labels.FromStrings("the_label", "value_1")},
promql.Series{Metric: labels.FromStrings("the_label", "value_2")},
promql.Series{Metric: labels.FromStrings("the_label", "value_3")},
},
},
"matrix with three series, series not in sorted order": {
matrix: Matrix{
Series{Metric: labels.FromStrings("the_label", "value_1")},
Series{Metric: labels.FromStrings("the_label", "value_3")},
Series{Metric: labels.FromStrings("the_label", "value_2")},
matrix: promql.Matrix{
promql.Series{Metric: labels.FromStrings("the_label", "value_1")},
promql.Series{Metric: labels.FromStrings("the_label", "value_3")},
promql.Series{Metric: labels.FromStrings("the_label", "value_2")},
},
expectedError: `matrix results should always be sorted by labels, but matrix is not sorted: series at index 2 with labels {the_label="value_2"} sorts before series at index 1 with labels {the_label="value_3"}`,
},

View file

@ -764,6 +764,14 @@ eval instant at 1m avg_over_time(metric10[1m])
eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m])
{} 0
# Test if very big intermediate values cause loss of detail.
clear
load 10s
metric 1 1e100 1 -1e100
eval instant at 1m sum_over_time(metric[1m])
{} 2
# Tests for stddev_over_time and stdvar_over_time.
clear
load 10s

View file

@ -0,0 +1,716 @@
# Minimal valid case: an empty histogram.
load 5m
empty_histogram {{}}
eval instant at 5m empty_histogram
{__name__="empty_histogram"} {{}}
eval instant at 5m histogram_count(empty_histogram)
{} 0
eval instant at 5m histogram_sum(empty_histogram)
{} 0
eval instant at 5m histogram_avg(empty_histogram)
{} NaN
eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram)
{} NaN
eval instant at 5m histogram_fraction(0, 8, empty_histogram)
{} NaN
# buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4).
load 5m
single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}
# histogram_count extracts the count property from the histogram.
eval instant at 5m histogram_count(single_histogram)
{} 4
# histogram_sum extracts the sum property from the histogram.
eval instant at 5m histogram_sum(single_histogram)
{} 5
# histogram_avg calculates the average from sum and count properties.
eval instant at 5m histogram_avg(single_histogram)
{} 1.25
# We expect half of the values to fall in the range 1 < x <= 2.
eval instant at 5m histogram_fraction(1, 2, single_histogram)
{} 0.5
# We expect all values to fall in the range 0 < x <= 8.
eval instant at 5m histogram_fraction(0, 8, single_histogram)
{} 1
# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2.
eval instant at 5m histogram_quantile(0.5, single_histogram)
{} 1.5
# Repeat the same histogram 10 times.
load 5m
multi_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}}x10
eval instant at 5m histogram_count(multi_histogram)
{} 4
eval instant at 5m histogram_sum(multi_histogram)
{} 5
eval instant at 5m histogram_avg(multi_histogram)
{} 1.25
eval instant at 5m histogram_fraction(1, 2, multi_histogram)
{} 0.5
eval instant at 5m histogram_quantile(0.5, multi_histogram)
{} 1.5
# Each entry should look the same as the first.
eval instant at 50m histogram_count(multi_histogram)
{} 4
eval instant at 50m histogram_sum(multi_histogram)
{} 5
eval instant at 50m histogram_avg(multi_histogram)
{} 1.25
eval instant at 50m histogram_fraction(1, 2, multi_histogram)
{} 0.5
eval instant at 50m histogram_quantile(0.5, multi_histogram)
{} 1.5
# Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket
# with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket
# positions for upper limits <1 (tending toward zero), where offset:-1 is the bucket to the left of offset:0.
load 5m
incr_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{sum:2 count:1 buckets:[1] offset:1}}x10
eval instant at 5m histogram_count(incr_histogram)
{} 5
eval instant at 5m histogram_sum(incr_histogram)
{} 6
eval instant at 5m histogram_avg(incr_histogram)
{} 1.2
# We expect 3/5ths of the values to fall in the range 1 < x <= 2.
eval instant at 5m histogram_fraction(1, 2, incr_histogram)
{} 0.6
eval instant at 5m histogram_quantile(0.5, incr_histogram)
{} 1.5
eval instant at 50m incr_histogram
{__name__="incr_histogram"} {{count:14 sum:24 buckets:[1 12 1]}}
eval instant at 50m histogram_count(incr_histogram)
{} 14
eval instant at 50m histogram_sum(incr_histogram)
{} 24
eval instant at 50m histogram_avg(incr_histogram)
{} 1.7142857142857142
# We expect 12/14ths of the values to fall in the range 1 < x <= 2.
eval instant at 50m histogram_fraction(1, 2, incr_histogram)
{} 0.8571428571428571
eval instant at 50m histogram_quantile(0.5, incr_histogram)
{} 1.5
# Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum.
eval instant at 50m rate(incr_histogram[5m])
{} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}}
# Calculate the 50th percentile of observations over the last 10m.
eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m]))
{} 1.5
# Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.:
# 0: 1 2 4 8 16 32 64 (higher resolution)
# -1: 1 4 16 64 (lower resolution)
#
# Histograms can be merged as long as the histogram to the right is same resolution or higher.
load 5m
low_res_histogram {{schema:-1 sum:4 count:1 buckets:[1] offset:1}}+{{schema:0 sum:4 count:4 buckets:[2 2] offset:1}}x1
eval instant at 5m low_res_histogram
{__name__="low_res_histogram"} {{schema:-1 count:5 sum:8 offset:1 buckets:[5]}}
eval instant at 5m histogram_count(low_res_histogram)
{} 5
eval instant at 5m histogram_sum(low_res_histogram)
{} 8
eval instant at 5m histogram_avg(low_res_histogram)
{} 1.6
# We expect all values to fall into the lower-resolution bucket with the range 1 < x <= 4.
eval instant at 5m histogram_fraction(1, 4, low_res_histogram)
{} 1
# z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range
# 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket.
load 5m
single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}}
eval instant at 5m histogram_count(single_zero_histogram)
{} 1
eval instant at 5m histogram_sum(single_zero_histogram)
{} 0.25
eval instant at 5m histogram_avg(single_zero_histogram)
{} 0.25
# When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally
# distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the
# entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5.
eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram)
{} 1
# Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5.
eval instant at 5m histogram_quantile(0.5, single_zero_histogram)
{} 0
# Let's turn single_histogram upside-down.
load 5m
negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}}
eval instant at 5m histogram_count(negative_histogram)
{} 4
eval instant at 5m histogram_sum(negative_histogram)
{} -5
eval instant at 5m histogram_avg(negative_histogram)
{} -1.25
# We expect half of the values to fall in the range -2 < x <= -1.
eval instant at 5m histogram_fraction(-2, -1, negative_histogram)
{} 0.5
eval instant at 5m histogram_quantile(0.5, negative_histogram)
{} -1.5
# Two histogram samples.
load 5m
two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}
# We expect to see the newest sample.
eval instant at 10m histogram_count(two_samples_histogram)
{} 4
eval instant at 10m histogram_sum(two_samples_histogram)
{} -4
eval instant at 10m histogram_avg(two_samples_histogram)
{} -1
eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram)
{} 0.5
eval instant at 10m histogram_quantile(0.5, two_samples_histogram)
{} -1.5
# Add two histograms with negated data.
load 5m
balanced_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}}+{{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}}x1
eval instant at 5m histogram_count(balanced_histogram)
{} 8
eval instant at 5m histogram_sum(balanced_histogram)
{} 0
eval instant at 5m histogram_avg(balanced_histogram)
{} 0
eval instant at 5m histogram_fraction(0, 4, balanced_histogram)
{} 0.5
# If the quantile happens to be located in a span of empty buckets, the actually returned value is the lower bound of
# the first populated bucket after the span of empty buckets.
eval instant at 5m histogram_quantile(0.5, balanced_histogram)
{} 0.5
# Add histogram to test sum(last_over_time) regression
load 5m
incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10
incr_sum_histogram{number="2"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:2 count:1 buckets:[1]}}x10
eval instant at 50m histogram_sum(sum(incr_sum_histogram))
{} 30
eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m])))
{} 30
# Apply rate function to histogram.
load 15s
histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100
eval instant at 5m rate(histogram_rate[45s])
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}
eval range from 5m to 5m30s step 30s rate(histogram_rate[45s])
{} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1
# Apply count and sum function to histogram.
load 10m
histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_count(histogram_count_sum_2)
{} 24
eval instant at 10m histogram_sum(histogram_count_sum_2)
{} 100
# Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res).
load 10m
histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1)
{} 1.0787993180043811
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1)
{} 1.163807968526718
# Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res).
load 10m
histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2)
{} 0.0048960313898237465
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2)
{} 2.3971123370139447e-05
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}.
load 10m
histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3)
{} 42.947236400258
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3)
{} 1844.4651144196398
# Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}.
load 10m
histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4)
{} 27556.344499842
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4)
{} 759352122.1939945
# Apply stddev and stdvar function to histogram with {-10x10}.
load 10m
histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5)
{} 1.3137084989848
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5)
{} 1.725830020304794
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}.
load 10m
histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6)
{} NaN
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6)
{} NaN
# Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}.
load 10m
histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1
eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7)
{} NaN
eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7)
{} NaN
# Apply quantile function to histogram with all positive buckets with zero bucket.
load 10m
histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_1)
{} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_1)
{} 16
eval instant at 10m histogram_quantile(0.99, histogram_quantile_1)
{} 15.759999999999998
eval instant at 10m histogram_quantile(0.9, histogram_quantile_1)
{} 13.600000000000001
eval instant at 10m histogram_quantile(0.6, histogram_quantile_1)
{} 4.799999999999997
eval instant at 10m histogram_quantile(0.5, histogram_quantile_1)
{} 1.6666666666666665
eval instant at 10m histogram_quantile(0.1, histogram_quantile_1)
{} 0.0006000000000000001
eval instant at 10m histogram_quantile(0, histogram_quantile_1)
{} 0
eval instant at 10m histogram_quantile(-1, histogram_quantile_1)
{} -Inf
# Apply quantile function to histogram with all negative buckets with zero bucket.
load 10m
histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_2)
{} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_2)
{} 0
eval instant at 10m histogram_quantile(0.99, histogram_quantile_2)
{} -6.000000000000048e-05
eval instant at 10m histogram_quantile(0.9, histogram_quantile_2)
{} -0.0005999999999999996
eval instant at 10m histogram_quantile(0.5, histogram_quantile_2)
{} -1.6666666666666667
eval instant at 10m histogram_quantile(0.1, histogram_quantile_2)
{} -13.6
eval instant at 10m histogram_quantile(0, histogram_quantile_2)
{} -16
eval instant at 10m histogram_quantile(-1, histogram_quantile_2)
{} -Inf
# Apply quantile function to histogram with both positive and negative buckets with zero bucket.
load 10m
histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_quantile(1.001, histogram_quantile_3)
{} Inf
eval instant at 10m histogram_quantile(1, histogram_quantile_3)
{} 16
eval instant at 10m histogram_quantile(0.99, histogram_quantile_3)
{} 15.519999999999996
eval instant at 10m histogram_quantile(0.9, histogram_quantile_3)
{} 11.200000000000003
eval instant at 10m histogram_quantile(0.7, histogram_quantile_3)
{} 1.2666666666666657
eval instant at 10m histogram_quantile(0.55, histogram_quantile_3)
{} 0.0006000000000000005
eval instant at 10m histogram_quantile(0.5, histogram_quantile_3)
{} 0
eval instant at 10m histogram_quantile(0.45, histogram_quantile_3)
{} -0.0005999999999999996
eval instant at 10m histogram_quantile(0.3, histogram_quantile_3)
{} -1.266666666666667
eval instant at 10m histogram_quantile(0.1, histogram_quantile_3)
{} -11.2
eval instant at 10m histogram_quantile(0.01, histogram_quantile_3)
{} -15.52
eval instant at 10m histogram_quantile(0, histogram_quantile_3)
{} -16
eval instant at 10m histogram_quantile(-1, histogram_quantile_3)
{} -Inf
# Apply fraction function to empty histogram.
load 10m
histogram_fraction_1 {{}}x1
eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1)
{} NaN
# Apply fraction function to histogram with positive and zero buckets.
load 10m
histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_2)
{} 1
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2)
{} 0.8333333333333334
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2)
{} 0.25
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2)
{} 0.125
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2)
{} 0.3333333333333333
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2)
{} 0.2916666666666667
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_2)
{} 0
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_2)
{} NaN
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_2)
{} NaN
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_2)
{} NaN
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_2)
{} 1
# Apply fraction function to histogram with negative and zero buckets.
load 10m
histogram_fraction_3 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_3)
{} 1
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_3)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_3)
{} 0.8333333333333334
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3)
{} 0.25
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3)
{} 0.125
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3)
{} 0.3333333333333333
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3)
{} 0.2916666666666667
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_3)
{} 0
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_3)
{} NaN
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_3)
{} NaN
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3)
{} NaN
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3)
{} 1
# Apply fraction function to histogram with both positive, negative and zero buckets.
load 10m
histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1
eval instant at 10m histogram_fraction(0, +Inf, histogram_fraction_4)
{} 0.5
eval instant at 10m histogram_fraction(-Inf, 0, histogram_fraction_4)
{} 0.5
eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4)
{} 0.4166666666666667
eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_4)
{} 0.4166666666666667
eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4)
{} 0.125
eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4)
{} 0.0625
eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4)
{} 0.14583333333333334
eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4)
{} 0.125
eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4)
{} 0.0625
eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4)
{} 0.16666666666666666
eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4)
{} 0.14583333333333334
eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4)
{} 0.08333333333333333
eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(0, 0, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(0.000001, 0.000001, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(42, 42, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(-3.1, -3.1, histogram_fraction_4)
{} 0
eval instant at 10m histogram_fraction(3.1415, NaN, histogram_fraction_4)
{} NaN
eval instant at 10m histogram_fraction(NaN, 42, histogram_fraction_4)
{} NaN
eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_4)
{} NaN
eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_4)
{} 1

Some files were not shown because too many files have changed in this diff Show more