Merge branch 'main' into mantine-ui

Signed-off-by: Julius Volz <julius.volz@gmail.com>
This commit is contained in:
Julius Volz 2024-07-15 14:11:48 +02:00
commit edf31da311
303 changed files with 23285 additions and 8197 deletions

View file

@ -11,6 +11,7 @@ updates:
go.opentelemetry.io:
patterns:
- "go.opentelemetry.io/*"
open-pull-requests-limit: 20
- package-ecosystem: "gomod"
directory: "/documentation/examples/remote_storage"
schedule:
@ -19,6 +20,7 @@ updates:
directory: "/web/ui"
schedule:
interval: "monthly"
open-pull-requests-limit: 20
- package-ecosystem: "github-actions"
directory: "/"
schedule:

View file

@ -12,14 +12,14 @@ jobs:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
with:
input: 'prompb'
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
- uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'

View file

@ -12,14 +12,14 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus'
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: bufbuild/buf-setup-action@dde0b9351db90fbf78e345f41a57de8514bf1091 # v1.32.2
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@044d13acb1f155179c606aaa2e53aea304d22058 # v1.1.0
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
with:
input: 'prompb'
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2
- uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4
with:
input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'

View file

@ -13,7 +13,7 @@ jobs:
# should also be updated.
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
@ -27,7 +27,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
@ -43,7 +43,7 @@ jobs:
# The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.21-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- run: make build
# Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
@ -57,7 +57,7 @@ jobs:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
with:
@ -74,8 +74,8 @@ jobs:
name: Go tests on Windows
runs-on: windows-latest
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
go-version: 1.22.x
- run: |
@ -91,7 +91,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -114,7 +114,7 @@ jobs:
matrix:
thread: [ 0, 1, 2 ]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build
with:
@ -137,32 +137,44 @@ jobs:
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build
with:
parallelism: 12
thread: ${{ matrix.thread }}
check_generated_parser:
name: Check generated parser
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
cache: false
go-version: 1.22.x
- name: Run goyacc and check for diff
run: make install-goyacc check-generated-parser
golangci:
name: golangci-lint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1
with:
cache: false
go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter'
- name: Lint
uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
with:
args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.56.2
version: v1.59.1
fuzzing:
uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request'
@ -175,7 +187,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_main
with:
@ -189,7 +201,7 @@ jobs:
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_release
with:
@ -204,7 +216,7 @@ jobs:
needs: [test_ui, codeql]
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- name: Install nodejs
uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2

View file

@ -24,15 +24,15 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Initialize CodeQL
uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
uses: github/codeql-action/init@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
uses: github/codeql-action/autobuild@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
uses: github/codeql-action/analyze@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # v3.25.8

View file

@ -4,6 +4,7 @@ on:
push:
paths:
- "README.md"
- "README-containers.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
@ -17,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
@ -29,7 +30,9 @@ jobs:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
readme_file: 'README.md'
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''
PushQuayIoReadme:
runs-on: ubuntu-latest
@ -37,7 +40,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
@ -49,4 +52,6 @@ jobs:
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
readme_file: 'README.md'
# Empty string results in README-containers.md being pushed if it
# exists. Otherwise, README.md is pushed.
readme_file: ''

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600
dry-run: false
- name: Upload Crash
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts

View file

@ -13,7 +13,7 @@ jobs:
container:
image: quay.io/prometheus/golang-builder
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- run: ./scripts/sync_repo_files.sh
env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,12 +21,12 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # tag=v4.1.2
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # tag=v2.3.1
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # tag=v2.3.3
with:
results_file: results.sarif
results_format: sarif
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # tag=v4.3.1
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
with:
name: SARIF file
path: results.sarif
@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12
uses: github/codeql-action/upload-sarif@2e230e8fe0ad3a14a340ad0815ddb96d599d2aff # tag=v3.25.8
with:
sarif_file: results.sarif

32
.gitpod.Dockerfile vendored
View file

@ -1,15 +1,33 @@
FROM gitpod/workspace-full
# Set Node.js version as an environment variable.
ENV CUSTOM_NODE_VERSION=16
ENV CUSTOM_GO_VERSION=1.19
ENV GOPATH=$HOME/go-packages
ENV GOROOT=$HOME/go
ENV PATH=$GOROOT/bin:$GOPATH/bin:$PATH
# Install and use the specified Node.js version via nvm.
RUN bash -c ". .nvm/nvm.sh && nvm install ${CUSTOM_NODE_VERSION} && nvm use ${CUSTOM_NODE_VERSION} && nvm alias default ${CUSTOM_NODE_VERSION}"
# Ensure nvm uses the default Node.js version in all new shells.
RUN echo "nvm use default &>/dev/null" >> ~/.bashrc.d/51-nvm-fix
RUN curl -fsSL https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz | tar xzs \
&& printf '%s\n' 'export GOPATH=/workspace/go' \
'export PATH=$GOPATH/bin:$PATH' > $HOME/.bashrc.d/300-go
# Remove any existing Go installation in $HOME path.
RUN rm -rf $HOME/go $HOME/go-packages
# Export go environment variables.
RUN echo "export GOPATH=/workspace/go" >> ~/.bashrc.d/300-go && \
echo "export GOBIN=\$GOPATH/bin" >> ~/.bashrc.d/300-go && \
echo "export GOROOT=${HOME}/go" >> ~/.bashrc.d/300-go && \
echo "export PATH=\$GOROOT/bin:\$GOBIN:\$PATH" >> ~/.bashrc
# Reload the environment variables to ensure go environment variables are
# available in subsequent commands.
RUN bash -c "source ~/.bashrc && source ~/.bashrc.d/300-go"
# Fetch the Go version dynamically from the Prometheus go.mod file and Install Go in $HOME path.
RUN export CUSTOM_GO_VERSION=$(curl -sSL "https://raw.githubusercontent.com/prometheus/prometheus/main/go.mod" | awk '/^go/{print $2".0"}') && \
curl -fsSL "https://dl.google.com/go/go${CUSTOM_GO_VERSION}.linux-amd64.tar.gz" | \
tar -xz -C $HOME
# Fetch the goyacc parser version dynamically from the Prometheus Makefile
# and install it globally in $GOBIN path.
RUN GOYACC_VERSION=$(curl -fsSL "https://raw.githubusercontent.com/prometheus/prometheus/main/Makefile" | awk -F'=' '/GOYACC_VERSION \?=/{gsub(/ /, "", $2); print $2}') && \
go install "golang.org/x/tools/cmd/goyacc@${GOYACC_VERSION}"

View file

@ -21,6 +21,7 @@ linters:
- goimports
- misspell
- nolintlint
- perfsprint
- predeclared
- revive
- testifylint
@ -28,6 +29,7 @@ linters:
- unused
- usestdlibvars
- whitespace
- loggercheck
issues:
max-same-issues: 0
@ -44,7 +46,9 @@ issues:
- linters:
- godot
source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string concatenation"
linters-settings:
depguard:
rules:
@ -85,6 +89,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus
gofumpt:
extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
revive:
# By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules.

View file

@ -2,7 +2,56 @@
## unreleased
## 2.53.0 / 2024-06-16
This release changes the default for GOGC, the Go runtime control for the trade-off between excess memory use and CPU usage. We have found that Prometheus operates with minimal additional CPU usage, but greatly reduced memory by adjusting the upstream Go default from 100 to 75.
* [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980 #14048
* [CHANGE] Runtime: Change GOGC threshold from 100 to 75 #14176 #14285
* [FEATURE] Rules: Add new option `query_offset` for each rule group via rule group configuration file and `rule_query_offset` as part of the global configuration to have more resilience for remote write delays. #14061 #14216 #14273
* [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` metric to measure the time it takes to restore a rule group. #13974
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics and at least one identifying label is defined. #13991
* [BUGFIX] Scrape: Do no try to ingest native histograms when the native histograms feature is turned off. This happened when protobuf scrape was enabled by for example the created time feature. #13987
* [BUGFIX] Scaleway SD: Use the instance's public IP if no private IP is available as the `__address__` meta label. #13941
* [BUGFIX] Query logger: Do not leak file descriptors on error. #13948
* [BUGFIX] TSDB: Let queries with heavy regex matches be cancelled and not use up the CPU. #14096 #14103 #14118 #14199
* [BUGFIX] API: Do not warn if result count is equal to the limit, only when exceeding the limit for the series, label-names and label-values APIs. #14116
* [BUGFIX] TSDB: Fix head stats and hooks when replaying a corrupted snapshot. #14079
## 2.52.1 / 2024-05-29
* [BUGFIX] Linode SD: Fix partial fetch when discovery would return more than 500 elements. #14141
## 2.52.0 / 2024-05-07
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
* [FEATURE] Alerting: Support native histogram templating. #13731
* [FEATURE] Linode SD: Support IPv6 range discovery and region filtering. #13774
* [ENHANCEMENT] PromQL: Performance improvements for queries with regex matchers. #13461
* [ENHANCEMENT] PromQL: Performance improvements when using aggregation operators. #13744
* [ENHANCEMENT] PromQL: Validate label_join destination label. #13803
* [ENHANCEMENT] Scrape: Increment `prometheus_target_scrapes_sample_duplicate_timestamp_total` metric on duplicated series during one scrape. #12933
* [ENHANCEMENT] TSDB: Many improvements in performance. #13742 #13673 #13782
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
* [BUGFIX] PromQL: Fix possible duplicated label name and values in a metric result for specific queries. #13845
* [BUGFIX] Scrape: Fix setting native histogram schema factor during scrape. #13846
* [BUGFIX] TSDB: Fix counting of histogram samples when creating WAL checkpoint stats. #13776
* [BUGFIX] TSDB: Fix cases of compacting empty heads. #13755
* [BUGFIX] TSDB: Count float histograms in WAL checkpoint. #13844
* [BUGFIX] Remote Read: Fix memory leak due to broken requests. #13777
* [BUGFIX] API: Stop building response for `/api/v1/series/` when the API request was cancelled. #13766
* [BUGFIX] promtool: Fix panic on `promtool tsdb analyze --extended` when no native histograms are present. #13976
## 2.51.2 / 2024-04-09

View file

@ -42,7 +42,12 @@ go build ./cmd/prometheus/
make test # Make sure all the tests pass before you commit and push :)
```
We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
To run a collection of Go linters through [`golangci-lint`](https://github.com/golangci/golangci-lint), do:
```bash
make lint
```
If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. See [this section of the golangci-lint documentation](https://golangci-lint.run/usage/false-positives/#nolint-directive) for more information.
All our issues are regularly tagged so that you can also filter down the issues involving the components you want to work on. For our labeling policy refer [the wiki page](https://github.com/prometheus/prometheus/wiki/Label-Names-and-Descriptions).

View file

@ -16,7 +16,7 @@ Maintainers for specific parts of the codebase:
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `storage`
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)

View file

@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 4m
GOYACC_VERSION ?= v0.6.0
include Makefile.common
@ -86,24 +87,42 @@ assets-tarball: assets
@echo '>> packaging assets'
scripts/package_assets.sh
# We only want to generate the parser when there's changes to the grammar.
.PHONY: parser
parser:
@echo ">> running goyacc to generate the .go file."
ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0"
@echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\""
else
goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
$(MAKE) promql/parser/generated_parser.y.go
endif
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
@echo ">> running goyacc to generate the .go file."
@$(FIRST_GOPATH)/bin/goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
.PHONY: clean-parser
clean-parser:
@echo ">> cleaning generated parser"
@rm -f promql/parser/generated_parser.y.go
.PHONY: check-generated-parser
check-generated-parser: clean-parser promql/parser/generated_parser.y.go
@echo ">> checking generated parser"
@git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false)
.PHONY: install-goyacc
install-goyacc:
@echo ">> installing goyacc $(GOYACC_VERSION)"
@go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)
.PHONY: test
# If we only want to only test go code we have to change the test target
# which is called by all.
ifeq ($(GO_ONLY),1)
test: common-test check-go-mod-version
else
test: common-test ui-build-module ui-test ui-lint check-go-mod-version
test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
endif
.PHONY: npm_licenses

View file

@ -55,13 +55,13 @@ ifneq ($(shell command -v gotestsum 2> /dev/null),)
endif
endif
PROMU_VERSION ?= 0.15.0
PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.56.2
GOLANGCI_LINT_VERSION ?= v1.59.1
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))

View file

@ -1,8 +1,8 @@
<h1 align="center" style="border-bottom: none">
<a href="//prometheus.io" target="_blank"><img alt="Prometheus" src="/documentation/images/prometheus-logo.svg"></a><br>Prometheus
<a href="https://prometheus.io" target="_blank"><img alt="Prometheus" src="/documentation/images/prometheus-logo.svg"></a><br>Prometheus
</h1>
<p align="center">Visit <a href="//prometheus.io" target="_blank">prometheus.io</a> for the full documentation,
<p align="center">Visit <a href="https://prometheus.io" target="_blank">prometheus.io</a> for the full documentation,
examples and guides.</p>
<div align="center">

View file

@ -57,7 +57,8 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
@ -149,6 +150,8 @@ Changes for a patch release or release candidate should be merged into the previ
Bump the version in the `VERSION` file and update `CHANGELOG.md`. Do this in a proper PR pointing to the release branch as this gives others the opportunity to chime in on the release in general and on the addition to the changelog in particular. For a release candidate, append something like `-rc.0` to the version (with the corresponding changes to the tag name, the release name etc.).
When updating the `CHANGELOG.md` look at all PRs included in the release since the last release and verify if they need a changelog entry.
Note that `CHANGELOG.md` should only document changes relevant to users of Prometheus, including external API changes, performance improvements, and new features. Do not document changes of internal interfaces, code refactorings and clean-ups, changes to the build process, etc. People interested in these are asked to refer to the git history.
For release candidates still update `CHANGELOG.md`, but when you cut the final release later, merge all the changes from the pre-releases into the one final update.

View file

@ -1 +1 @@
2.51.2
2.53.0

View file

@ -28,6 +28,8 @@ import (
"os/signal"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"syscall"
@ -42,6 +44,7 @@ import (
"github.com/mwitkow/go-conntrack"
"github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
@ -191,6 +194,9 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "extra-scrape-metrics":
c.scrape.ExtraMetrics = true
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
case "metadata-wal-records":
c.scrape.AppendMetadata = true
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
case "new-service-discovery-manager":
c.enableNewSDManager = true
level.Info(logger).Log("msg", "Experimental service discovery manager")
@ -217,6 +223,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
case "native-histograms":
c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
@ -254,6 +261,18 @@ func main() {
newFlagRetentionDuration model.Duration
)
// Unregister the default GoCollector, and reregister with our defaults.
if prometheus.Unregister(collectors.NewGoCollector()) {
prometheus.MustRegister(
collectors.NewGoCollector(
collectors.WithGoCollectorRuntimeMetrics(
collectors.MetricsGC,
collectors.MetricsScheduler,
),
),
)
}
cfg := flagConfig{
notifier: notifier.Options{
Registerer: prometheus.DefaultRegisterer,
@ -309,9 +328,15 @@ func main() {
a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
Default("false").BoolVar(&cfg.web.EnableAdminAPI)
// TODO(bwplotka): Consider allowing those remote receive flags to be changed in config.
// See https://github.com/prometheus/prometheus/issues/14410
a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
supportedRemoteWriteProtoMsgs := config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2}
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
@ -420,7 +445,7 @@ func main() {
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay)
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently.").
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly.").
Default("4").Int64Var(&cfg.maxConcurrentEvals)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
@ -432,6 +457,9 @@ func main() {
serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
Default("10000").IntVar(&cfg.notifier.QueueCapacity)
serverOnlyFlag(a, "alertmanager.drain-notification-queue-on-shutdown", "Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down.").
Default("true").BoolVar(&cfg.notifier.DrainOnShutdown)
// TODO: Remove in Prometheus 3.0.
alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
@ -630,7 +658,7 @@ func main() {
var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
)
@ -774,6 +802,9 @@ func main() {
ResendDelay: time.Duration(cfg.resendDelay),
MaxConcurrentEvals: cfg.maxConcurrentEvals,
ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
DefaultRuleQueryOffset: func() time.Duration {
return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset)
},
})
}
@ -1181,7 +1212,7 @@ func main() {
}
if agentMode {
// WAL storage.
opts := cfg.agent.ToAgentOptions()
opts := cfg.agent.ToAgentOptions(cfg.tsdb.OutOfOrderTimeWindow)
cancel := make(chan struct{})
g.Add(
func() error {
@ -1217,6 +1248,7 @@ func main() {
"TruncateFrequency", cfg.agent.TruncateFrequency,
"MinWALTime", cfg.agent.MinWALTime,
"MaxWALTime", cfg.agent.MaxWALTime,
"OutOfOrderTimeWindow", cfg.agent.OutOfOrderTimeWindow,
)
localStorage.Set(db, 0)
@ -1370,6 +1402,17 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
}
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
if oldGoGC != conf.Runtime.GoGC {
level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
}
// Write the new setting out to the ENV var for runtime API output.
if conf.Runtime.GoGC >= 0 {
os.Setenv("GOGC", strconv.Itoa(conf.Runtime.GoGC))
} else {
os.Setenv("GOGC", "off")
}
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
level.Info(logger).Log(append(l, timings...)...)
@ -1709,9 +1752,13 @@ type agentOptions struct {
TruncateFrequency model.Duration
MinWALTime, MaxWALTime model.Duration
NoLockfile bool
OutOfOrderTimeWindow int64
}
func (opts agentOptions) ToAgentOptions() agent.Options {
func (opts agentOptions) ToAgentOptions(outOfOrderTimeWindow int64) agent.Options {
if outOfOrderTimeWindow < 0 {
outOfOrderTimeWindow = 0
}
return agent.Options{
WALSegmentSize: int(opts.WALSegmentSize),
WALCompression: wlog.ParseCompressionType(opts.WALCompression, opts.WALCompressionType),
@ -1720,6 +1767,7 @@ func (opts agentOptions) ToAgentOptions() agent.Options {
MinWALTime: durationToInt64Millis(time.Duration(opts.MinWALTime)),
MaxWALTime: durationToInt64Millis(time.Duration(opts.MaxWALTime)),
NoLockfile: opts.NoLockfile,
OutOfOrderTimeWindow: outOfOrderTimeWindow,
}
}
@ -1731,3 +1779,39 @@ type discoveryManager interface {
Run() error
SyncCh() <-chan map[string][]*targetgroup.Group
}
// rwProtoMsgFlagParser is a custom parser for config.RemoteWriteProtoMsg enum.
type rwProtoMsgFlagParser struct {
msgs *[]config.RemoteWriteProtoMsg
}
func rwProtoMsgFlagValue(msgs *[]config.RemoteWriteProtoMsg) kingpin.Value {
return &rwProtoMsgFlagParser{msgs: msgs}
}
// IsCumulative is used by kingpin to tell if it's an array or not.
func (p *rwProtoMsgFlagParser) IsCumulative() bool {
return true
}
func (p *rwProtoMsgFlagParser) String() string {
ss := make([]string, 0, len(*p.msgs))
for _, t := range *p.msgs {
ss = append(ss, string(t))
}
return strings.Join(ss, ",")
}
func (p *rwProtoMsgFlagParser) Set(opt string) error {
t := config.RemoteWriteProtoMsg(opt)
if err := t.Validate(); err != nil {
return err
}
for _, prev := range *p.msgs {
if prev == t {
return fmt.Errorf("duplicated %v flag value, got %v already", t, *p.msgs)
}
}
*p.msgs = append(*p.msgs, t)
return nil
}

View file

@ -24,16 +24,19 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/rules"
@ -189,7 +192,7 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases {
tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
require.NotEmpty(t, tc.in, "sender called with 0 alert")
require.Equal(t, tc.exp, alerts)
@ -498,3 +501,65 @@ func TestDocumentation(t *testing.T) {
require.Equal(t, string(expectedContent), generatedContent, "Generated content does not match documentation. Hint: run `make cli-documentation`.")
}
func TestRwProtoMsgFlagParser(t *testing.T) {
defaultOpts := config.RemoteWriteProtoMsgs{
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
}
for _, tcase := range []struct {
args []string
expected []config.RemoteWriteProtoMsg
expectedErr error
}{
{
args: nil,
expected: defaultOpts,
},
{
args: []string{"--test-proto-msgs", "test"},
expectedErr: errors.New("unknown remote write protobuf message test, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request"),
},
{
args: []string{"--test-proto-msgs", "io.prometheus.write.v2.Request"},
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2},
},
{
args: []string{
"--test-proto-msgs", "io.prometheus.write.v2.Request",
"--test-proto-msgs", "io.prometheus.write.v2.Request",
},
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request] already"),
},
{
args: []string{
"--test-proto-msgs", "io.prometheus.write.v2.Request",
"--test-proto-msgs", "prometheus.WriteRequest",
},
expected: config.RemoteWriteProtoMsgs{config.RemoteWriteProtoMsgV2, config.RemoteWriteProtoMsgV1},
},
{
args: []string{
"--test-proto-msgs", "io.prometheus.write.v2.Request",
"--test-proto-msgs", "prometheus.WriteRequest",
"--test-proto-msgs", "io.prometheus.write.v2.Request",
},
expectedErr: errors.New("duplicated io.prometheus.write.v2.Request flag value, got [io.prometheus.write.v2.Request prometheus.WriteRequest] already"),
},
} {
t.Run(strings.Join(tcase.args, ","), func(t *testing.T) {
a := kingpin.New("test", "")
var opt []config.RemoteWriteProtoMsg
a.Flag("test-proto-msgs", "").Default(defaultOpts.Strings()...).SetValue(rwProtoMsgFlagValue(&opt))
_, err := a.Parse(tcase.args)
if tcase.expectedErr != nil {
require.Error(t, err)
require.Equal(t, tcase.expectedErr, err)
} else {
require.NoError(t, err)
require.Equal(t, tcase.expected, opt)
}
})
}
}

View file

@ -72,7 +72,7 @@ func (p *queryLogTest) waitForPrometheus() error {
var err error
for x := 0; x < 20; x++ {
var r *http.Response
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == 200 {
if r, err = http.Get(fmt.Sprintf("http://%s:%d%s/-/ready", p.host, p.port, p.prefix)); err == nil && r.StatusCode == http.StatusOK {
break
}
time.Sleep(500 * time.Millisecond)
@ -296,7 +296,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
require.Equal(t, 1, qc)
} else {
require.Greater(t, qc, 0, "no queries logged")
require.Positive(t, qc, "no queries logged")
}
p.validateLastQuery(t, ql)
@ -366,7 +366,7 @@ func (p *queryLogTest) run(t *testing.T) {
if p.exactQueryCount() {
require.Equal(t, 1, qc)
} else {
require.Greater(t, qc, 0, "no queries logged")
require.Positive(t, qc, "no queries logged")
}
}

View file

@ -22,6 +22,7 @@ import (
"time"
"github.com/go-kit/log"
"github.com/oklog/ulid"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse"
@ -88,7 +89,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration)
db, err := tsdb.OpenDBReadOnly(outputDir, nil)
db, err := tsdb.OpenDBReadOnly(outputDir, "", nil)
if err != nil {
return err
}
@ -191,6 +192,10 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
if quiet {
break
}
// Empty block, don't print.
if block.Compare(ulid.ULID{}) == 0 {
break
}
blocks, err := db.Blocks()
if err != nil {
return fmt.Errorf("get blocks: %w", err)

View file

@ -56,8 +56,8 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier"
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/util/documentcli"
)
@ -235,12 +235,14 @@ func main() {
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.")
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
@ -377,7 +379,7 @@ func main() {
case testRulesCmd.FullCommand():
os.Exit(RulesUnitTest(
promql.LazyLoaderOpts{
promqltest.LazyLoaderOpts{
EnableAtModifier: true,
EnableNegativeOffset: true,
},
@ -396,9 +398,9 @@ func main() {
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
case tsdbDumpOpenMetricsCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))

View file

@ -25,6 +25,7 @@ import (
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} {
t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} {
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) {
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal {
args = append(args, "--lint-fatal")

View file

@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
}
func listBlocks(path string, humanReadable bool) error {
db, err := tsdb.OpenDBReadOnly(path, nil)
db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil {
return err
}
@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string {
}
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
db, err := tsdb.OpenDBReadOnly(path, nil)
db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil {
return nil, nil, err
}
@ -708,8 +708,8 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
type SeriesSetFormatter func(series storage.SeriesSet) error
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(path, nil)
func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil {
return err
}
@ -838,6 +838,10 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
}
func displayHistogram(dataType string, datas []int, total int) {
if len(datas) == 0 {
fmt.Printf("%s: N/A\n\n", dataType)
return
}
slices.Sort(datas)
start, end, step := generateBucket(datas[0], datas[len(datas)-1])
sum := 0
@ -852,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
}
avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end)))
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step)))
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount)))
maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets {
percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))

View file

@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/tsdb"
)
@ -64,6 +64,7 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin
err := dumpSamples(
context.Background(),
path,
t.TempDir(),
mint,
maxt,
match,
@ -88,7 +89,7 @@ func normalizeNewLine(b []byte) []byte {
}
func TestTSDBDump(t *testing.T) {
storage := promql.LoadedStorage(t, `
storage := promqltest.LoadedStorage(t, `
load 1m
metric{foo="bar", baz="abc"} 1 2 3 4 5
heavy_metric{foo="bar"} 5 4 3 2 1
@ -158,7 +159,7 @@ func TestTSDBDump(t *testing.T) {
}
func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promql.LoadedStorage(t, `
storage := promqltest.LoadedStorage(t, `
load 1m
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7

View file

@ -36,13 +36,14 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage"
)
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false
var run *regexp.Regexp
@ -69,7 +70,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, diffFla
return successExitCode
}
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
fmt.Println("Unit Testing: ", filename)
b, err := os.ReadFile(filename)
@ -175,9 +176,9 @@ type testGroup struct {
}
// test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite.
suite, err := promql.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil {
return []error{err}
}
@ -413,7 +414,7 @@ Outer:
gotSamples = append(gotSamples, parsedSample{
Labels: s.Metric.Copy(),
Value: s.F,
Histogram: promql.HistogramTestExpression(s.H),
Histogram: promqltest.HistogramTestExpression(s.H),
})
}
@ -443,7 +444,7 @@ Outer:
expSamples = append(expSamples, parsedSample{
Labels: lb,
Value: s.Value,
Histogram: promql.HistogramTestExpression(hist),
Histogram: promqltest.HistogramTestExpression(hist),
})
}
@ -572,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
}
s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] {
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ")
s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
}
s += "\n]"

View file

@ -18,7 +18,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/promqltest"
)
func TestRulesUnitTest(t *testing.T) {
@ -28,7 +28,7 @@ func TestRulesUnitTest(t *testing.T) {
tests := []struct {
name string
args args
queryOpts promql.LazyLoaderOpts
queryOpts promqltest.LazyLoaderOpts
want int
}{
{
@ -92,7 +92,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/at-modifier-test.yml"},
},
queryOpts: promql.LazyLoaderOpts{
queryOpts: promqltest.LazyLoaderOpts{
EnableAtModifier: true,
},
want: 0,
@ -109,7 +109,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/negative-offset-test.yml"},
},
queryOpts: promql.LazyLoaderOpts{
queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
},
want: 0,
@ -119,7 +119,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{
files: []string{"./testdata/no-test-group-interval.yml"},
},
queryOpts: promql.LazyLoaderOpts{
queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true,
},
want: 0,
@ -142,7 +142,7 @@ func TestRulesUnitTestRun(t *testing.T) {
tests := []struct {
name string
args args
queryOpts promql.LazyLoaderOpts
queryOpts promqltest.LazyLoaderOpts
want int
}{
{

View file

@ -20,6 +20,7 @@ import (
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
@ -145,11 +146,17 @@ var (
ScrapeInterval: model.Duration(1 * time.Minute),
ScrapeTimeout: model.Duration(10 * time.Second),
EvaluationInterval: model.Duration(1 * time.Minute),
RuleQueryOffset: model.Duration(0 * time.Minute),
// When native histogram feature flag is enabled, ScrapeProtocols default
// changes to DefaultNativeHistogramScrapeProtocols.
ScrapeProtocols: DefaultScrapeProtocols,
}
DefaultRuntimeConfig = RuntimeConfig{
// Go runtime tuning.
GoGC: 75,
}
// DefaultScrapeConfig is the default scrape configuration.
DefaultScrapeConfig = ScrapeConfig{
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
@ -173,6 +180,7 @@ var (
// DefaultRemoteWriteConfig is the default remote write configuration.
DefaultRemoteWriteConfig = RemoteWriteConfig{
RemoteTimeout: model.Duration(30 * time.Second),
ProtobufMessage: RemoteWriteProtoMsgV1,
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
HTTPClientConfig: config.DefaultHTTPClientConfig,
@ -224,6 +232,7 @@ var (
// Config is the top-level configuration for Prometheus's config files.
type Config struct {
GlobalConfig GlobalConfig `yaml:"global"`
Runtime RuntimeConfig `yaml:"runtime,omitempty"`
AlertingConfig AlertingConfig `yaml:"alerting,omitempty"`
RuleFiles []string `yaml:"rule_files,omitempty"`
ScrapeConfigFiles []string `yaml:"scrape_config_files,omitempty"`
@ -271,7 +280,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
jobNames := map[string]string{}
for i, scfg := range c.ScrapeConfigs {
// We do these checks for library users that would not call Validate in
// We do these checks for library users that would not call validate in
// Unmarshal.
if err := scfg.Validate(c.GlobalConfig); err != nil {
return nil, err
@ -334,6 +343,14 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
c.GlobalConfig = DefaultGlobalConfig
}
// If a runtime block was open but empty the default runtime config is overwritten.
// We have to restore it here.
if c.Runtime.isZero() {
c.Runtime = DefaultRuntimeConfig
// Use the GOGC env var value if the runtime section is empty.
c.Runtime.GoGC = getGoGCEnv()
}
for _, rf := range c.RuleFiles {
if !patRulePath.MatchString(rf) {
return fmt.Errorf("invalid rule file path %q", rf)
@ -397,6 +414,8 @@ type GlobalConfig struct {
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
// How frequently to evaluate rules by default.
EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"`
// Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"`
// File to which PromQL queries are logged.
QueryLogFile string `yaml:"query_log_file,omitempty"`
// The labels to add to any timeseries that this Prometheus instance scrapes.
@ -556,10 +575,22 @@ func (c *GlobalConfig) isZero() bool {
c.ScrapeInterval == 0 &&
c.ScrapeTimeout == 0 &&
c.EvaluationInterval == 0 &&
c.RuleQueryOffset == 0 &&
c.QueryLogFile == "" &&
c.ScrapeProtocols == nil
}
// RuntimeConfig configures the values for the process behavior.
type RuntimeConfig struct {
// The Go garbage collection target percentage.
GoGC int `yaml:"gogc,omitempty"`
}
// isZero returns true iff the global config is the zero value.
func (c *RuntimeConfig) isZero() bool {
return c.GoGC == 0
}
type ScrapeConfigs struct {
ScrapeConfigs []*ScrapeConfig `yaml:"scrape_configs,omitempty"`
}
@ -1025,6 +1056,49 @@ func CheckTargetAddress(address model.LabelValue) error {
return nil
}
// RemoteWriteProtoMsg represents the known protobuf message for the remote write
// 1.0 and 2.0 specs.
type RemoteWriteProtoMsg string
// Validate returns error if the given reference for the protobuf message is not supported.
func (s RemoteWriteProtoMsg) Validate() error {
switch s {
case RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2:
return nil
default:
return fmt.Errorf("unknown remote write protobuf message %v, supported: %v", s, RemoteWriteProtoMsgs{RemoteWriteProtoMsgV1, RemoteWriteProtoMsgV2}.String())
}
}
type RemoteWriteProtoMsgs []RemoteWriteProtoMsg
func (m RemoteWriteProtoMsgs) Strings() []string {
ret := make([]string, 0, len(m))
for _, typ := range m {
ret = append(ret, string(typ))
}
return ret
}
func (m RemoteWriteProtoMsgs) String() string {
return strings.Join(m.Strings(), ", ")
}
var (
// RemoteWriteProtoMsgV1 represents the deprecated `prometheus.WriteRequest` protobuf
// message introduced in the https://prometheus.io/docs/specs/remote_write_spec/.
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.
RemoteWriteProtoMsgV1 RemoteWriteProtoMsg = "prometheus.WriteRequest"
// RemoteWriteProtoMsgV2 represents the `io.prometheus.write.v2.Request` protobuf
// message introduced in https://prometheus.io/docs/specs/remote_write_spec_2_0/
//
// NOTE: This string is used for both HTTP header values and config value, so don't change
// this reference.
RemoteWriteProtoMsgV2 RemoteWriteProtoMsg = "io.prometheus.write.v2.Request"
)
// RemoteWriteConfig is the configuration for writing to remote storage.
type RemoteWriteConfig struct {
URL *config.URL `yaml:"url"`
@ -1034,6 +1108,9 @@ type RemoteWriteConfig struct {
Name string `yaml:"name,omitempty"`
SendExemplars bool `yaml:"send_exemplars,omitempty"`
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
// ProtobufMessage specifies the protobuf message to use against the remote
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
// We cannot do proper Go type embedding below as the parser will then parse
// values arbitrarily into the overflow maps of further-down types.
@ -1068,6 +1145,10 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
return err
}
if err := c.ProtobufMessage.Validate(); err != nil {
return fmt.Errorf("invalid protobuf_message value: %w", err)
}
// The UnmarshalYAML method of HTTPClientConfig is not being called because it's not a pointer.
// We cannot make it a pointer as the parser panics for inlined pointer structs.
// Thus we just do its validation here.
@ -1207,3 +1288,19 @@ func filePath(filename string) string {
func fileErr(filename string, err error) error {
return fmt.Errorf("%q: %w", filePath(filename), err)
}
func getGoGCEnv() int {
goGCEnv := os.Getenv("GOGC")
// If the GOGC env var is set, use the same logic as upstream Go.
if goGCEnv != "" {
// Special case for GOGC=off.
if strings.ToLower(goGCEnv) == "off" {
return -1
}
i, err := strconv.Atoi(goGCEnv)
if err == nil {
return i
}
}
return DefaultRuntimeConfig.GoGC
}

View file

@ -19,6 +19,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml"
var ruleFilesExpectedConf = &Config{
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
RuleFiles: []string{
"testdata/first.rules",
"testdata/rules/second.rules",

View file

@ -76,6 +76,7 @@ const (
globLabelLimit = 30
globLabelNameLengthLimit = 200
globLabelValueLengthLimit = 200
globalGoGC = 42
)
var expectedConf = &Config{
@ -96,6 +97,10 @@ var expectedConf = &Config{
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
},
Runtime: RuntimeConfig{
GoGC: globalGoGC,
},
RuleFiles: []string{
filepath.FromSlash("testdata/first.rules"),
filepath.FromSlash("testdata/my/*.rules"),
@ -104,6 +109,7 @@ var expectedConf = &Config{
RemoteWriteConfigs: []*RemoteWriteConfig{
{
URL: mustParseURL("http://remote1/push"),
ProtobufMessage: RemoteWriteProtoMsgV1,
RemoteTimeout: model.Duration(30 * time.Second),
Name: "drop_expensive",
WriteRelabelConfigs: []*relabel.Config{
@ -133,6 +139,7 @@ var expectedConf = &Config{
},
{
URL: mustParseURL("http://remote2/push"),
ProtobufMessage: RemoteWriteProtoMsgV2,
RemoteTimeout: model.Duration(30 * time.Second),
QueueConfig: DefaultQueueConfig,
MetadataConfig: DefaultMetadataConfig,
@ -993,6 +1000,7 @@ var expectedConf = &Config{
HostNetworkingHost: "localhost",
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
MatchFirstNetwork: true,
},
},
},
@ -1794,6 +1802,10 @@ var expectedErrors = []struct {
filename: "remote_write_authorization_header.bad.yml",
errMsg: `authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter`,
},
{
filename: "remote_write_wrong_msg.bad.yml",
errMsg: `invalid protobuf_message value: unknown remote write protobuf message io.prometheus.writet.v2.Request, supported: prometheus.WriteRequest, io.prometheus.write.v2.Request`,
},
{
filename: "remote_write_url_missing.bad.yml",
errMsg: `url for remote_write is empty`,
@ -2081,6 +2093,7 @@ func TestEmptyGlobalBlock(t *testing.T) {
c, err := Load("global:\n", false, log.NewNopLogger())
require.NoError(t, err)
exp := DefaultConfig
exp.Runtime = DefaultRuntimeConfig
require.Equal(t, exp, *c)
}

View file

@ -17,6 +17,7 @@ const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml"
var ruleFilesExpectedConf = &Config{
GlobalConfig: DefaultGlobalConfig,
Runtime: DefaultRuntimeConfig,
RuleFiles: []string{
"testdata\\first.rules",
"testdata\\rules\\second.rules",

View file

@ -14,6 +14,9 @@ global:
monitor: codelab
foo: bar
runtime:
gogc: 42
rule_files:
- "first.rules"
- "my/*.rules"
@ -34,6 +37,7 @@ remote_write:
key_file: valid_key_file
- url: http://remote2/push
protobuf_message: io.prometheus.write.v2.Request
name: rw_tls
tls_config:
cert_file: valid_cert_file

View file

@ -0,0 +1,3 @@
remote_write:
- url: localhost:9090
protobuf_message: io.prometheus.writet.v2.Request # typo in 'write"

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
@ -53,6 +54,7 @@ const (
ec2LabelInstanceType = ec2Label + "instance_type"
ec2LabelOwnerID = ec2Label + "owner_id"
ec2LabelPlatform = ec2Label + "platform"
ec2LabelPrimaryIPv6Addresses = ec2Label + "primary_ipv6_addresses"
ec2LabelPrimarySubnetID = ec2Label + "primary_subnet_id"
ec2LabelPrivateDNS = ec2Label + "private_dns_name"
ec2LabelPrivateIP = ec2Label + "private_ip"
@ -279,7 +281,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if inst.PrivateDnsName != nil {
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.Platform != nil {
@ -316,6 +318,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
var subnets []string
var ipv6addrs []string
var primaryipv6addrs []string
subnetsMap := make(map[string]struct{})
for _, eni := range inst.NetworkInterfaces {
if eni.SubnetId == nil {
@ -329,6 +332,15 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
for _, ipv6addr := range eni.Ipv6Addresses {
ipv6addrs = append(ipv6addrs, *ipv6addr.Ipv6Address)
if *ipv6addr.IsPrimaryIpv6 {
// we might have to extend the slice with more than one element
// that could leave empty strings in the list which is intentional
// to keep the position/device index information
for int64(len(primaryipv6addrs)) <= *eni.Attachment.DeviceIndex {
primaryipv6addrs = append(primaryipv6addrs, "")
}
primaryipv6addrs[*eni.Attachment.DeviceIndex] = *ipv6addr.Ipv6Address
}
}
}
labels[ec2LabelSubnetID] = model.LabelValue(
@ -341,6 +353,12 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
strings.Join(ipv6addrs, ec2LabelSeparator) +
ec2LabelSeparator)
}
if len(primaryipv6addrs) > 0 {
labels[ec2LabelPrimaryIPv6Addresses] = model.LabelValue(
ec2LabelSeparator +
strings.Join(primaryipv6addrs, ec2LabelSeparator) +
ec2LabelSeparator)
}
}
for _, t := range inst.Tags {

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"time"
@ -229,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
lightsailLabelRegion: model.LabelValue(d.cfg.Region),
}
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port))
addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil {

View file

@ -20,6 +20,7 @@ import (
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
@ -492,7 +493,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil
}

View file

@ -539,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
// since the service may be registered remotely through a different node.
var addr string
if serviceNode.Service.Address != "" {
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
} else {
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port))
addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
}
labels := model.LabelSet{

View file

@ -177,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
}
labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)),
doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelImageName: model.LabelValue(droplet.Image.Name),

View file

@ -18,6 +18,7 @@ import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
@ -200,7 +201,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
tg := &targetgroup.Group{}
hostPort := func(a string, p int) model.LabelValue {
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p)))
return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
}
for _, record := range response.Answer {
@ -209,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
switch addr := record.(type) {
case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port))
dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
// Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".")

View file

@ -97,6 +97,7 @@ func fetchApps(ctx context.Context, server string, client *http.Client) (*Applic
resp.Body.Close()
}()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during eureka service discovery", resp.StatusCode)
}

View file

@ -15,7 +15,6 @@ package hetzner
import (
"context"
"fmt"
"net"
"net/http"
"strconv"
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
for i, server := range servers {
labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)),
hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)),
hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)),
hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
}

View file

@ -87,6 +87,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
resp.Body.Close()
}()
//nolint:usestdlibvars
if resp.StatusCode/100 != 2 {
return nil, fmt.Errorf("non 2xx status '%d' response during hetzner service discovery with role robot", resp.StatusCode)
}
@ -112,7 +113,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)),
hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
}

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
var cfg discovery.StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Source: strconv.Itoa(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},

View file

@ -186,12 +186,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
if d.lastResults != nil && d.eventPollingEnabled {
// Check to see if there have been any events. If so, refresh our data.
opts := linodego.ListOptions{
eventsOpts := linodego.ListOptions{
PageOptions: &linodego.PageOptions{Page: 1},
PageSize: 25,
Filter: fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")),
}
events, err := d.client.ListEvents(ctx, &opts)
events, err := d.client.ListEvents(ctx, &eventsOpts)
if err != nil {
var e *linodego.Error
if errors.As(err, &e) && e.Code == http.StatusUnauthorized {
@ -232,31 +232,40 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
tg := &targetgroup.Group{
Source: "Linode",
}
opts := linodego.ListOptions{
// We need 3 of these because Linodego writes into the structure during pagination
listInstancesOpts := linodego.ListOptions{
PageSize: 500,
}
listIPAddressesOpts := linodego.ListOptions{
PageSize: 500,
}
listIPv6RangesOpts := linodego.ListOptions{
PageSize: 500,
}
// If region filter provided, use it to constrain results.
if d.region != "" {
opts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
listInstancesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
listIPAddressesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
listIPv6RangesOpts.Filter = fmt.Sprintf(regionFilterTemplate, d.region)
}
// Gather all linode instances.
instances, err := d.client.ListInstances(ctx, &opts)
instances, err := d.client.ListInstances(ctx, &listInstancesOpts)
if err != nil {
d.metrics.failuresCount.Inc()
return nil, err
}
// Gather detailed IP address info for all IPs on all linode instances.
detailedIPs, err := d.client.ListIPAddresses(ctx, &opts)
detailedIPs, err := d.client.ListIPAddresses(ctx, &listIPAddressesOpts)
if err != nil {
d.metrics.failuresCount.Inc()
return nil, err
}
// Gather detailed IPv6 Range info for all linode instances.
ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &opts)
ipv6RangeList, err := d.client.ListIPv6Ranges(ctx, &listIPv6RangesOpts)
if err != nil {
d.metrics.failuresCount.Inc()
return nil, err
@ -325,7 +334,7 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
}
labels := model.LabelSet{
linodeLabelID: model.LabelValue(fmt.Sprintf("%d", instance.ID)),
linodeLabelID: model.LabelValue(strconv.Itoa(instance.ID)),
linodeLabelName: model.LabelValue(instance.Label),
linodeLabelImage: model.LabelValue(instance.Image),
linodeLabelPrivateIPv4: model.LabelValue(privateIPv4),
@ -338,13 +347,13 @@ func (d *Discovery) refreshData(ctx context.Context) ([]*targetgroup.Group, erro
linodeLabelType: model.LabelValue(instance.Type),
linodeLabelStatus: model.LabelValue(instance.Status),
linodeLabelGroup: model.LabelValue(instance.Group),
linodeLabelGPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.GPUs)),
linodeLabelGPUs: model.LabelValue(strconv.Itoa(instance.Specs.GPUs)),
linodeLabelHypervisor: model.LabelValue(instance.Hypervisor),
linodeLabelBackups: model.LabelValue(backupsStatus),
linodeLabelSpecsDiskBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Disk)<<20)),
linodeLabelSpecsMemoryBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Memory)<<20)),
linodeLabelSpecsVCPUs: model.LabelValue(fmt.Sprintf("%d", instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(fmt.Sprintf("%d", int64(instance.Specs.Transfer)<<20)),
linodeLabelSpecsDiskBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Disk)<<20, 10)),
linodeLabelSpecsMemoryBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Memory)<<20, 10)),
linodeLabelSpecsVCPUs: model.LabelValue(strconv.Itoa(instance.Specs.VCPUs)),
linodeLabelSpecsTransferBytes: model.LabelValue(strconv.FormatInt(int64(instance.Specs.Transfer)<<20, 10)),
}
addr := net.JoinHostPort(publicIPv4, strconv.FormatUint(uint64(d.port), 10))

View file

@ -120,6 +120,16 @@ func Name(n string) func(*Manager) {
}
}
// Updatert sets the updatert of the manager.
// Used to speed up tests.
func Updatert(u time.Duration) func(*Manager) {
return func(m *Manager) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.updatert = u
}
}
// HTTPClientOptions sets the list of HTTP client options to expose to
// Discoverers. It is up to Discoverers to choose to use the options provided.
func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {

View file

@ -720,7 +720,7 @@ func staticConfig(addrs ...string) StaticConfig {
var cfg StaticConfig
for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i),
Source: strconv.Itoa(i),
Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)},
},

View file

@ -505,7 +505,7 @@ func targetEndpoint(task *task, port uint32, containerNet bool) string {
host = task.Host
}
return net.JoinHostPort(host, fmt.Sprintf("%d", port))
return net.JoinHostPort(host, strconv.Itoa(int(port)))
}
// Get a list of ports and a list of labels from a PortMapping.

View file

@ -19,16 +19,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
func init() {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
}
// Metrics to be used with a discovery manager.
type Metrics struct {
FailedConfigs prometheus.Gauge

View file

@ -35,6 +35,11 @@ const (
workqueueMetricsNamespace = KubernetesMetricsNamespace + "_workqueue"
)
var (
clientGoRequestMetrics = &clientGoRequestMetricAdapter{}
clientGoWorkloadMetrics = &clientGoWorkqueueMetricsProvider{}
)
var (
// Metrics for client-go's HTTP requests.
clientGoRequestResultMetricVec = prometheus.NewCounterVec(
@ -135,6 +140,9 @@ func clientGoMetrics() []prometheus.Collector {
}
func RegisterK8sClientMetricsWithPrometheus(registerer prometheus.Registerer) error {
clientGoRequestMetrics.RegisterWithK8sGoClient()
clientGoWorkloadMetrics.RegisterWithK8sGoClient()
for _, collector := range clientGoMetrics() {
err := registerer.Register(collector)
if err != nil {

View file

@ -22,8 +22,10 @@ import (
"strconv"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
@ -58,6 +60,7 @@ var DefaultDockerSDConfig = DockerSDConfig{
Filters: []Filter{},
HostNetworkingHost: "localhost",
HTTPClientConfig: config.DefaultHTTPClientConfig,
MatchFirstNetwork: true,
}
func init() {
@ -74,6 +77,7 @@ type DockerSDConfig struct {
HostNetworkingHost string `yaml:"host_networking_host"`
RefreshInterval model.Duration `yaml:"refresh_interval"`
MatchFirstNetwork bool `yaml:"match_first_network"`
}
// NewDiscovererMetrics implements discovery.Config.
@ -119,6 +123,7 @@ type DockerDiscovery struct {
port int
hostNetworkingHost string
filters filters.Args
matchFirstNetwork bool
}
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
@ -131,6 +136,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discove
d := &DockerDiscovery{
port: conf.Port,
hostNetworkingHost: conf.HostNetworkingHost,
matchFirstNetwork: conf.MatchFirstNetwork,
}
hostURL, err := url.Parse(conf.Host)
@ -202,6 +208,11 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
return nil, fmt.Errorf("error while computing network labels: %w", err)
}
allContainers := make(map[string]types.Container)
for _, c := range containers {
allContainers[c.ID] = c
}
for _, c := range containers {
if len(c.Names) == 0 {
continue
@ -218,7 +229,50 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
commonLabels[dockerLabelContainerLabelPrefix+ln] = v
}
for _, n := range c.NetworkSettings.Networks {
networks := c.NetworkSettings.Networks
containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode)
if len(networks) == 0 {
// Try to lookup shared networks
for {
if containerNetworkMode.IsContainer() {
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
if !exists {
break
}
networks = tmpContainer.NetworkSettings.Networks
containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode)
if len(networks) > 0 {
break
}
} else {
break
}
}
}
if d.matchFirstNetwork && len(networks) > 1 {
// Match user defined network
if containerNetworkMode.IsUserDefined() {
networkMode := string(containerNetworkMode)
networks = map[string]*network.EndpointSettings{networkMode: networks[networkMode]}
} else {
// Get first network if container network mode has "none" value.
// This case appears under certain condition:
// 1. Container created with network set to "--net=none".
// 2. Disconnect network "none".
// 3. Reconnect network with user defined networks.
var first string
for k, n := range networks {
if n != nil {
first = k
break
}
}
networks = map[string]*network.EndpointSettings{first: networks[first]}
}
}
for _, n := range networks {
var added bool
for _, p := range c.Ports {

View file

@ -16,6 +16,7 @@ package moby
import (
"context"
"fmt"
"sort"
"testing"
"github.com/go-kit/log"
@ -59,7 +60,7 @@ host: %s
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 3)
require.Len(t, tg.Targets, 6)
for i, lbls := range []model.LabelSet{
{
@ -113,9 +114,259 @@ host: %s
"__meta_docker_container_network_mode": "host",
"__meta_docker_network_ip": "",
},
{
"__address__": "172.20.0.2:3306",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.20.0.2:33060",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.20.0.2:9104",
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_name": "/dockersd_mysql_exporter",
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])
})
}
}
func TestDockerSDRefreshMatchAllNetworks(t *testing.T) {
sdmock := NewSDMock(t, "dockerprom")
sdmock.Setup()
e := sdmock.Endpoint()
url := e[:len(e)-1]
cfgString := fmt.Sprintf(`
---
host: %s
`, url)
var cfg DockerSDConfig
require.NoError(t, yaml.Unmarshal([]byte(cfgString), &cfg))
cfg.MatchFirstNetwork = false
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
defer refreshMetrics.Unregister()
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
require.NoError(t, err)
ctx := context.Background()
tgs, err := d.refresh(ctx)
require.NoError(t, err)
require.Len(t, tgs, 1)
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 9)
sortFunc := func(labelSets []model.LabelSet) {
sort.Slice(labelSets, func(i, j int) bool {
return labelSets[i]["__address__"] < labelSets[j]["__address__"]
})
}
expected := []model.LabelSet{
{
"__address__": "172.19.0.2:9100",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "node",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_label_prometheus_job": "node",
"__meta_docker_container_name": "/dockersd_node_1",
"__meta_docker_container_network_mode": "dockersd_default",
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.19.0.2",
"__meta_docker_network_label_com_docker_compose_network": "default",
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
"__meta_docker_network_name": "dockersd_default",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9100",
},
{
"__address__": "172.19.0.3:80",
"__meta_docker_container_id": "c301b928faceb1a18fe379f6bc178727ef920bb30b0f9b8592b32b36255a0eca",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "noport",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_label_prometheus_job": "noport",
"__meta_docker_container_name": "/dockersd_noport_1",
"__meta_docker_container_network_mode": "dockersd_default",
"__meta_docker_network_id": "7189986ab399e144e52a71b7451b4e04e2158c044b4cd2f3ae26fc3a285d3798",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.19.0.3",
"__meta_docker_network_label_com_docker_compose_network": "default",
"__meta_docker_network_label_com_docker_compose_project": "dockersd",
"__meta_docker_network_label_com_docker_compose_version": "1.25.0",
"__meta_docker_network_name": "dockersd_default",
"__meta_docker_network_scope": "local",
},
{
"__address__": "localhost",
"__meta_docker_container_id": "54ed6cc5c0988260436cb0e739b7b6c9cad6c439a93b4c4fdbe9753e1c94b189",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "host_networking",
"__meta_docker_container_label_com_docker_compose_version": "1.25.0",
"__meta_docker_container_name": "/dockersd_host_networking_1",
"__meta_docker_container_network_mode": "host",
"__meta_docker_network_ip": "",
},
{
"__address__": "172.20.0.2:3306",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.20.0.2:33060",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.21.0.2:3306",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.2",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "3306",
},
{
"__address__": "172.21.0.2:33060",
"__meta_docker_container_id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysql",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_name": "/dockersd_mysql",
"__meta_docker_container_network_mode": "dockersd_private",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.2",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "33060",
},
{
"__address__": "172.21.0.2:9104",
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_name": "/dockersd_mysql_exporter",
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_network_id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.21.0.2",
"__meta_docker_network_name": "dockersd_private1",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
{
"__address__": "172.20.0.2:9104",
"__meta_docker_container_id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"__meta_docker_container_label_com_docker_compose_project": "dockersd",
"__meta_docker_container_label_com_docker_compose_service": "mysqlexporter",
"__meta_docker_container_label_com_docker_compose_version": "2.2.2",
"__meta_docker_container_label_maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>",
"__meta_docker_container_name": "/dockersd_mysql_exporter",
"__meta_docker_container_network_mode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"__meta_docker_network_id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"__meta_docker_network_ingress": "false",
"__meta_docker_network_internal": "false",
"__meta_docker_network_ip": "172.20.0.2",
"__meta_docker_network_name": "dockersd_private",
"__meta_docker_network_scope": "local",
"__meta_docker_port_private": "9104",
},
}
sortFunc(expected)
sortFunc(tg.Targets)
for i, lbls := range expected {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])
})
}
}

View file

@ -15,7 +15,7 @@ package moby
import (
"context"
"fmt"
"strconv"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
@ -44,8 +44,8 @@ func getNetworksLabels(ctx context.Context, client *client.Client, labelPrefix s
labelPrefix + labelNetworkID: network.ID,
labelPrefix + labelNetworkName: network.Name,
labelPrefix + labelNetworkScope: network.Scope,
labelPrefix + labelNetworkInternal: fmt.Sprintf("%t", network.Internal),
labelPrefix + labelNetworkIngress: fmt.Sprintf("%t", network.Ingress),
labelPrefix + labelNetworkInternal: strconv.FormatBool(network.Internal),
labelPrefix + labelNetworkIngress: strconv.FormatBool(network.Ingress),
}
for k, v := range network.Labels {
ln := strutil.SanitizeLabelName(k)

View file

@ -66,7 +66,7 @@ func (d *Discovery) refreshNodes(ctx context.Context) ([]*targetgroup.Group, err
swarmLabelNodeAddress: model.LabelValue(n.Status.Addr),
}
if n.ManagerStatus != nil {
labels[swarmLabelNodeManagerLeader] = model.LabelValue(fmt.Sprintf("%t", n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerLeader] = model.LabelValue(strconv.FormatBool(n.ManagerStatus.Leader))
labels[swarmLabelNodeManagerReachability] = model.LabelValue(n.ManagerStatus.Reachability)
labels[swarmLabelNodeManagerAddr] = model.LabelValue(n.ManagerStatus.Addr)
}

View file

@ -116,7 +116,7 @@ func (d *Discovery) refreshServices(ctx context.Context) ([]*targetgroup.Group,
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)

View file

@ -150,7 +150,7 @@ func (d *Discovery) refreshTasks(ctx context.Context) ([]*targetgroup.Group, err
labels[model.LabelName(k)] = model.LabelValue(v)
}
addr := net.JoinHostPort(ip.String(), fmt.Sprintf("%d", d.port))
addr := net.JoinHostPort(ip.String(), strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, labels)

View file

@ -128,5 +128,105 @@
}
},
"Mounts": []
},
{
"Id": "f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8",
"Names": [
"/dockersd_mysql"
],
"Image": "mysql:5.7.29",
"ImageID": "sha256:5d9483f9a7b21c87e0f5b9776c3e06567603c28c0062013eda127c968175f5e8",
"Command": "mysqld",
"Created": 1616273136,
"Ports": [
{
"PrivatePort": 3306,
"Type": "tcp"
},
{
"PrivatePort": 33060,
"Type": "tcp"
}
],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "mysql",
"com.docker.compose.version": "2.2.2"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "dockersd_private"
},
"NetworkSettings": {
"Networks": {
"dockersd_private": {
"IPAMConfig": null,
"Links": null,
"Aliases": null,
"NetworkID": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"EndpointID": "80f8a61b37701a9991bb98c75ddd23fd9b7c16b5575ca81343f6b44ff4a2a9d9",
"Gateway": "172.20.0.1",
"IPAddress": "172.20.0.2",
"IPPrefixLen": 16,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:14:00:0a",
"DriverOpts": null
},
"dockersd_private1": {
"IPAMConfig": {},
"Links": null,
"Aliases": [
"mysql",
"mysql",
"f9ade4b83199"
],
"NetworkID": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"EndpointID": "f80921d10e78c99a5907705aae75befea40c3d3e9f820e66ab392f7274be16b8",
"Gateway": "172.21.0.1",
"IPAddress": "172.21.0.2",
"IPPrefixLen": 24,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "02:42:ac:15:00:02",
"DriverOpts": null
}
}
},
"Mounts": []
},
{
"Id": "59bf76e8816af98856b90dd619c91027145ca501043b1c51756d03b085882e06",
"Names": [
"/dockersd_mysql_exporter"
],
"Image": "prom/mysqld-exporter:latest",
"ImageID": "sha256:121b8a7cd0525dd89aaec58ad7d34c3bb3714740e5a67daf6510ccf71ab219a9",
"Command": "/bin/mysqld_exporter",
"Created": 1616273136,
"Ports": [
{
"PrivatePort": 9104,
"Type": "tcp"
}
],
"Labels": {
"com.docker.compose.project": "dockersd",
"com.docker.compose.service": "mysqlexporter",
"com.docker.compose.version": "2.2.2",
"maintainer": "The Prometheus Authors <prometheus-developers@googlegroups.com>"
},
"State": "running",
"Status": "Up 40 seconds",
"HostConfig": {
"NetworkMode": "container:f9ade4b83199d6f83020b7c0bfd1e8281b19dbf9e6cef2cf89bc45c8f8d20fe8"
},
"NetworkSettings": {
"Networks": {}
},
"Mounts": []
}
]

View file

@ -111,5 +111,59 @@
"Containers": {},
"Options": {},
"Labels": {}
},
{
"Name": "dockersd_private",
"Id": "e804771e55254a360fdb70dfdd78d3610fdde231b14ef2f837a00ac1eeb9e601",
"Created": "2022-03-25T09:21:17.718370976+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.20.0.1/16"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
},
{
"Name": "dockersd_private1",
"Id": "bfcf66a6b64f7d518f009e34290dc3f3c66a08164257ad1afc3bd31d75f656e8",
"Created": "2022-03-25T09:21:17.718370976+08:00",
"Scope": "local",
"Driver": "bridge",
"EnableIPv6": false,
"IPAM": {
"Driver": "default",
"Options": null,
"Config": [
{
"Subnet": "172.21.0.1/16"
}
]
},
"Internal": false,
"Attachable": false,
"Ingress": false,
"ConfigFrom": {
"Network": ""
},
"ConfigOnly": false,
"Containers": {},
"Options": {},
"Labels": {}
}
]

View file

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/go-kit/log"
"github.com/gophercloud/gophercloud"
@ -72,7 +73,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + h.region),
Source: "OS_" + h.region,
}
// OpenStack API reference
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
@ -84,7 +85,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
}
for _, hypervisor := range hypervisorList {
labels := model.LabelSet{}
addr := net.JoinHostPort(hypervisor.HostIP, fmt.Sprintf("%d", h.port))
addr := net.JoinHostPort(hypervisor.HostIP, strconv.Itoa(h.port))
labels[model.AddressLabel] = model.LabelValue(addr)
labels[openstackLabelHypervisorID] = model.LabelValue(hypervisor.ID)
labels[openstackLabelHypervisorHostName] = model.LabelValue(hypervisor.HypervisorHostname)

View file

@ -17,6 +17,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -120,7 +121,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
}
pager := servers.List(client, opts)
tg := &targetgroup.Group{
Source: fmt.Sprintf("OS_" + i.region),
Source: "OS_" + i.region,
}
err = pager.EachPage(func(page pagination.Page) (bool, error) {
if ctx.Err() != nil {
@ -145,12 +146,18 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
openstackLabelUserID: model.LabelValue(s.UserID),
}
flavorID, ok := s.Flavor["id"].(string)
if !ok {
level.Warn(i.logger).Log("msg", "Invalid type for flavor id, expected string")
flavorName, nameOk := s.Flavor["original_name"].(string)
// "original_name" is only available for microversion >= 2.47. It was added in favor of "id".
if !nameOk {
flavorID, idOk := s.Flavor["id"].(string)
if !idOk {
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string")
continue
}
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
} else {
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorName)
}
imageID, ok := s.Image["id"].(string)
if ok {
@ -194,7 +201,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
lbls[openstackLabelPublicIP] = model.LabelValue(val)
}
addr = net.JoinHostPort(addr, fmt.Sprintf("%d", i.port))
addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
lbls[model.AddressLabel] = model.LabelValue(addr)
tg.Targets = append(tg.Targets, lbls)

View file

@ -84,7 +84,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
},
{
"__address__": model.LabelValue("10.0.0.31:0"),
"__meta_openstack_instance_flavor": model.LabelValue("1"),
"__meta_openstack_instance_flavor": model.LabelValue("m1.medium"),
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682ba"),
"__meta_openstack_instance_image": model.LabelValue("f90f6034-2570-4974-8351-6b49732ef2eb"),
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
@ -96,7 +96,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
},
{
"__address__": model.LabelValue("10.0.0.33:0"),
"__meta_openstack_instance_flavor": model.LabelValue("4"),
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
"__meta_openstack_instance_name": model.LabelValue("merp"),
@ -108,7 +108,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
},
{
"__address__": model.LabelValue("10.0.0.34:0"),
"__meta_openstack_instance_flavor": model.LabelValue("4"),
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
"__meta_openstack_instance_id": model.LabelValue("9e5476bd-a4ec-4653-93d6-72c93aa682bb"),
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
"__meta_openstack_instance_name": model.LabelValue("merp"),

View file

@ -427,13 +427,17 @@ const serverListBody = `
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
"flavor": {
"id": "1",
"links": [
{
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
"rel": "bookmark"
"vcpus": 2,
"ram": 4096,
"disk": 0,
"ephemeral": 0,
"swap": 0,
"original_name": "m1.medium",
"extra_specs": {
"aggregate_instance_extra_specs:general": "true",
"hw:mem_page_size": "large",
"hw:vif_multiqueue_enabled": "true"
}
]
},
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba",
"security_groups": [
@ -498,13 +502,17 @@ const serverListBody = `
"OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000",
"OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack",
"flavor": {
"id": "4",
"links": [
{
"href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1",
"rel": "bookmark"
"vcpus": 2,
"ram": 4096,
"disk": 0,
"ephemeral": 0,
"swap": 0,
"original_name": "m1.small",
"extra_specs": {
"aggregate_instance_extra_specs:general": "true",
"hw:mem_page_size": "large",
"hw:vif_multiqueue_enabled": "true"
}
]
},
"id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb",
"security_groups": [

View file

@ -144,12 +144,12 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
model.InstanceLabel: model.LabelValue(server.Name),
dedicatedServerLabelPrefix + "state": model.LabelValue(server.State),
dedicatedServerLabelPrefix + "commercial_range": model.LabelValue(server.CommercialRange),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(fmt.Sprintf("%d", server.LinkSpeed)),
dedicatedServerLabelPrefix + "link_speed": model.LabelValue(strconv.Itoa(server.LinkSpeed)),
dedicatedServerLabelPrefix + "rack": model.LabelValue(server.Rack),
dedicatedServerLabelPrefix + "no_intervention": model.LabelValue(strconv.FormatBool(server.NoIntervention)),
dedicatedServerLabelPrefix + "os": model.LabelValue(server.Os),
dedicatedServerLabelPrefix + "support_level": model.LabelValue(server.SupportLevel),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(fmt.Sprintf("%d", server.ServerID)),
dedicatedServerLabelPrefix + "server_id": model.LabelValue(strconv.FormatInt(server.ServerID, 10)),
dedicatedServerLabelPrefix + "reverse": model.LabelValue(server.Reverse),
dedicatedServerLabelPrefix + "datacenter": model.LabelValue(server.Datacenter),
dedicatedServerLabelPrefix + "name": model.LabelValue(server.Name),

View file

@ -66,7 +66,7 @@ endpoint: %s
_, err := createClient(&conf)
require.ErrorContains(t, err, "missing application key")
require.ErrorContains(t, err, "missing authentication information")
}
func TestParseIPs(t *testing.T) {

View file

@ -19,6 +19,7 @@ import (
"net/netip"
"net/url"
"path"
"strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@ -161,21 +162,21 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
model.InstanceLabel: model.LabelValue(server.Name),
vpsLabelPrefix + "offer": model.LabelValue(server.Model.Offer),
vpsLabelPrefix + "datacenter": model.LabelValue(fmt.Sprintf("%+v", server.Model.Datacenter)),
vpsLabelPrefix + "model_vcore": model.LabelValue(fmt.Sprintf("%d", server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(fmt.Sprintf("%d", server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "model_vcore": model.LabelValue(strconv.Itoa(server.Model.Vcore)),
vpsLabelPrefix + "maximum_additional_ip": model.LabelValue(strconv.Itoa(server.Model.MaximumAdditionalIP)),
vpsLabelPrefix + "version": model.LabelValue(server.Model.Version),
vpsLabelPrefix + "model_name": model.LabelValue(server.Model.Name),
vpsLabelPrefix + "disk": model.LabelValue(fmt.Sprintf("%d", server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(fmt.Sprintf("%d", server.Model.Memory)),
vpsLabelPrefix + "disk": model.LabelValue(strconv.Itoa(server.Model.Disk)),
vpsLabelPrefix + "memory": model.LabelValue(strconv.Itoa(server.Model.Memory)),
vpsLabelPrefix + "zone": model.LabelValue(server.Zone),
vpsLabelPrefix + "display_name": model.LabelValue(server.DisplayName),
vpsLabelPrefix + "cluster": model.LabelValue(server.Cluster),
vpsLabelPrefix + "state": model.LabelValue(server.State),
vpsLabelPrefix + "name": model.LabelValue(server.Name),
vpsLabelPrefix + "netboot_mode": model.LabelValue(server.NetbootMode),
vpsLabelPrefix + "memory_limit": model.LabelValue(fmt.Sprintf("%d", server.MemoryLimit)),
vpsLabelPrefix + "memory_limit": model.LabelValue(strconv.Itoa(server.MemoryLimit)),
vpsLabelPrefix + "offer_type": model.LabelValue(server.OfferType),
vpsLabelPrefix + "vcore": model.LabelValue(fmt.Sprintf("%d", server.Vcore)),
vpsLabelPrefix + "vcore": model.LabelValue(strconv.Itoa(server.Vcore)),
vpsLabelPrefix + "ipv4": model.LabelValue(ipv4),
vpsLabelPrefix + "ipv6": model.LabelValue(ipv6),
}

View file

@ -237,7 +237,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
pdbLabelResource: model.LabelValue(resource.Resource),
pdbLabelType: model.LabelValue(resource.Type),
pdbLabelTitle: model.LabelValue(resource.Title),
pdbLabelExported: model.LabelValue(fmt.Sprintf("%t", resource.Exported)),
pdbLabelExported: model.LabelValue(strconv.FormatBool(resource.Exported)),
pdbLabelFile: model.LabelValue(resource.File),
pdbLabelEnvironment: model.LabelValue(resource.Environment),
}

View file

@ -174,20 +174,25 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
labels[instanceTagsLabel] = model.LabelValue(tags)
}
if server.IPv6 != nil {
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String())
addr := ""
if server.IPv6 != nil { //nolint:staticcheck
labels[instancePublicIPv6Label] = model.LabelValue(server.IPv6.Address.String()) //nolint:staticcheck
addr = server.IPv6.Address.String() //nolint:staticcheck
}
if server.PublicIP != nil {
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String())
if server.PublicIP != nil { //nolint:staticcheck
labels[instancePublicIPv4Label] = model.LabelValue(server.PublicIP.Address.String()) //nolint:staticcheck
addr = server.PublicIP.Address.String() //nolint:staticcheck
}
if server.PrivateIP != nil {
labels[instancePrivateIPv4Label] = model.LabelValue(*server.PrivateIP)
addr = *server.PrivateIP
}
addr := net.JoinHostPort(*server.PrivateIP, strconv.FormatUint(uint64(d.port), 10))
if addr != "" {
addr := net.JoinHostPort(addr, strconv.FormatUint(uint64(d.port), 10))
labels[model.AddressLabel] = model.LabelValue(addr)
targets = append(targets, labels)
}
}

View file

@ -60,7 +60,7 @@ api_url: %s
tg := tgs[0]
require.NotNil(t, tg)
require.NotNil(t, tg.Targets)
require.Len(t, tg.Targets, 2)
require.Len(t, tg.Targets, 3)
for i, lbls := range []model.LabelSet{
{
@ -110,6 +110,28 @@ api_url: %s
"__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "fr-par-1",
},
{
"__address__": "51.158.183.115:80",
"__meta_scaleway_instance_boot_type": "local",
"__meta_scaleway_instance_hostname": "routed-dualstack",
"__meta_scaleway_instance_id": "4904366a-7e26-4b65-b97b-6392c761247a",
"__meta_scaleway_instance_image_arch": "x86_64",
"__meta_scaleway_instance_image_id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"__meta_scaleway_instance_image_name": "Ubuntu 22.04 Jammy Jellyfish",
"__meta_scaleway_instance_location_cluster_id": "19",
"__meta_scaleway_instance_location_hypervisor_id": "1201",
"__meta_scaleway_instance_location_node_id": "24",
"__meta_scaleway_instance_name": "routed-dualstack",
"__meta_scaleway_instance_organization_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_project_id": "20b3d507-96ac-454c-a795-bc731b46b12f",
"__meta_scaleway_instance_public_ipv4": "51.158.183.115",
"__meta_scaleway_instance_region": "nl-ams",
"__meta_scaleway_instance_security_group_id": "984414da-9fc2-49c0-a925-fed6266fe092",
"__meta_scaleway_instance_security_group_name": "Default security group",
"__meta_scaleway_instance_status": "running",
"__meta_scaleway_instance_type": "DEV1-S",
"__meta_scaleway_instance_zone": "nl-ams-1",
},
} {
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
require.Equal(t, lbls, tg.Targets[i])

View file

@ -216,6 +216,146 @@
"placement_group": null,
"private_nics": [],
"zone": "fr-par-1"
},
{
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack",
"arch": "x86_64",
"commercial_type": "DEV1-S",
"boot_type": "local",
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"hostname": "routed-dualstack",
"image": {
"id": "3e0a5b84-1d69-4993-8fa4-0d7df52d5160",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"project": "51b656e3-4865-41e8-adbc-0c45bdd780db",
"root_volume": {
"id": "13d945b9-5e78-4f9d-8ac4-c4bc2fa7c31a",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "unified",
"size": 10000000000
},
"extra_volumes": {},
"public": true,
"arch": "x86_64",
"creation_date": "2024-02-22T15:52:56.037007+00:00",
"modification_date": "2024-02-22T15:52:56.037007+00:00",
"default_bootscript": null,
"from_server": null,
"state": "available",
"tags": [],
"zone": "nl-ams-1"
},
"volumes": {
"0": {
"boot": false,
"id": "fe85c817-e67e-4e24-8f13-bde3e9f42620",
"name": "Ubuntu 22.04 Jammy Jellyfish",
"volume_type": "l_ssd",
"export_uri": null,
"organization": "20b3d507-96ac-454c-a795-bc731b46b12f",
"project": "20b3d507-96ac-454c-a795-bc731b46b12f",
"server": {
"id": "4904366a-7e26-4b65-b97b-6392c761247a",
"name": "routed-dualstack"
},
"size": 20000000000,
"state": "available",
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:50:14.019739+00:00",
"tags": [],
"zone": "nl-ams-1"
}
},
"tags": [],
"state": "running",
"protected": false,
"state_detail": "booted",
"public_ip": {
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
"public_ips": [
{
"id": "53f8f861-7a11-4b16-a4bc-fb8f4b4a11d0",
"address": "51.158.183.115",
"dynamic": false,
"gateway": "62.210.0.1",
"netmask": "32",
"family": "inet",
"provisioning_mode": "dhcp",
"tags": [],
"state": "attached",
"ipam_id": "ec3499ff-a664-49b7-818a-9fe4b95aee5e"
},
{
"id": "f52a8c81-0875-4aee-b96e-eccfc6bec367",
"address": "2001:bc8:1640:1568:dc00:ff:fe21:91b",
"dynamic": false,
"gateway": "fe80::dc00:ff:fe21:91c",
"netmask": "64",
"family": "inet6",
"provisioning_mode": "slaac",
"tags": [],
"state": "attached",
"ipam_id": "40d1e6ea-e932-42f9-8acb-55398bec7ad6"
}
],
"mac_address": "de:00:00:21:09:1b",
"routed_ip_enabled": true,
"ipv6": null,
"extra_networks": [],
"dynamic_ip_required": false,
"enable_ipv6": false,
"private_ip": null,
"creation_date": "2024-04-19T14:50:14.019739+00:00",
"modification_date": "2024-04-19T14:52:21.181670+00:00",
"bootscript": {
"id": "5a520dda-96d6-4ed2-acd1-1d526b6859fe",
"public": true,
"title": "x86_64 mainline 4.4.182 rev1",
"architecture": "x86_64",
"organization": "11111111-1111-4111-8111-111111111111",
"project": "11111111-1111-4111-8111-111111111111",
"kernel": "http://10.196.2.9/kernel/x86_64-mainline-lts-4.4-4.4.182-rev1/vmlinuz-4.4.182",
"dtb": "",
"initrd": "http://10.196.2.9/initrd/initrd-Linux-x86_64-v3.14.6.gz",
"bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
"default": true,
"zone": "nl-ams-1"
},
"security_group": {
"id": "984414da-9fc2-49c0-a925-fed6266fe092",
"name": "Default security group"
},
"location": {
"zone_id": "ams1",
"platform_id": "23",
"cluster_id": "19",
"hypervisor_id": "1201",
"node_id": "24"
},
"maintenances": [],
"allowed_actions": [
"poweroff",
"terminate",
"reboot",
"stop_in_place",
"backup"
],
"placement_group": null,
"private_nics": [],
"zone": "nl-ams-1"
}
]
}

View file

@ -20,6 +20,7 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
@ -269,7 +270,7 @@ func (d *Discovery) getEndpointLabels(
model.AddressLabel: model.LabelValue(addr),
uyuniLabelMinionHostname: model.LabelValue(networkInfo.Hostname),
uyuniLabelPrimaryFQDN: model.LabelValue(networkInfo.PrimaryFQDN),
uyuniLablelSystemID: model.LabelValue(fmt.Sprintf("%d", endpoint.SystemID)),
uyuniLablelSystemID: model.LabelValue(strconv.Itoa(endpoint.SystemID)),
uyuniLablelGroups: model.LabelValue(strings.Join(managedGroupNames, d.separator)),
uyuniLablelEndpointName: model.LabelValue(endpoint.EndpointName),
uyuniLablelExporter: model.LabelValue(endpoint.ExporterName),

View file

@ -280,17 +280,17 @@ func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{}
labels[serversetPathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.ServiceEndpoint.Host, fmt.Sprintf("%d", member.ServiceEndpoint.Port)))
net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.ServiceEndpoint.Port))
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
for name, endpoint := range member.AdditionalEndpoints {
cleanName := model.LabelName(strutil.SanitizeLabelName(name))
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
fmt.Sprintf("%d", endpoint.Port))
strconv.Itoa(endpoint.Port))
}
labels[serversetStatusLabel] = model.LabelValue(member.Status)
@ -321,10 +321,10 @@ func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
labels := model.LabelSet{}
labels[nervePathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.Host, fmt.Sprintf("%d", member.Port)))
net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(fmt.Sprintf("%d", member.Port))
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
return labels, nil

View file

@ -26,6 +26,7 @@ The Prometheus monitoring server
| <code class="text-nowrap">--web.enable-lifecycle</code> | Enable shutdown and reload via HTTP request. | `false` |
| <code class="text-nowrap">--web.enable-admin-api</code> | Enable API endpoints for admin control actions. | `false` |
| <code class="text-nowrap">--web.enable-remote-write-receiver</code> | Enable API endpoint accepting remote write requests. | `false` |
| <code class="text-nowrap">--web.remote-write-receiver.accepted-protobuf-messages</code> | List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: prometheus.WriteRequest, io.prometheus.write.v2.Request | `prometheus.WriteRequest` |
| <code class="text-nowrap">--web.console.templates</code> | Path to the console template directory, available at /consoles. | `consoles` |
| <code class="text-nowrap">--web.console.libraries</code> | Path to the console library directory. | `console_libraries` |
| <code class="text-nowrap">--web.page-title</code> | Document title of Prometheus instance. | `Prometheus Time Series Collection and Processing Server` |
@ -48,8 +49,9 @@ The Prometheus monitoring server
| <code class="text-nowrap">--rules.alert.for-outage-tolerance</code> | Max time to tolerate prometheus outage for restoring "for" state of alert. Use with server mode only. | `1h` |
| <code class="text-nowrap">--rules.alert.for-grace-period</code> | Minimum duration between alert and restored "for" state. This is maintained only for alerts with configured "for" time greater than grace period. Use with server mode only. | `10m` |
| <code class="text-nowrap">--rules.alert.resend-delay</code> | Minimum amount of time to wait before resending an alert to Alertmanager. Use with server mode only. | `1m` |
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. Use with server mode only. | `4` |
| <code class="text-nowrap">--rules.max-concurrent-evals</code> | Global concurrency limit for independent rules that can run concurrently. When set, "query.max-concurrency" may need to be adjusted accordingly. Use with server mode only. | `4` |
| <code class="text-nowrap">--alertmanager.notification-queue-capacity</code> | The capacity of the queue for pending Alertmanager notifications. Use with server mode only. | `10000` |
| <code class="text-nowrap">--alertmanager.drain-notification-queue-on-shutdown</code> | Send any outstanding Alertmanager notifications when shutting down. If false, any outstanding Alertmanager notifications will be dropped when shutting down. Use with server mode only. | `true` |
| <code class="text-nowrap">--query.lookback-delta</code> | The maximum lookback duration for retrieving metrics during expression evaluations and federation. Use with server mode only. | `5m` |
| <code class="text-nowrap">--query.timeout</code> | Maximum time a query may take before being aborted. Use with server mode only. | `2m` |
| <code class="text-nowrap">--query.max-concurrency</code> | Maximum number of queries executed concurrently. Use with server mode only. | `20` |

View file

@ -566,6 +566,7 @@ Dump samples from a TSDB.
| Flag | Description | Default |
| --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |
@ -584,7 +585,7 @@ Dump samples from a TSDB.
##### `promtool tsdb dump-openmetrics`
[Experimental] Dump samples from a TSDB into OpenMetrics format. Native histograms are not dumped.
[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.
@ -592,6 +593,7 @@ Dump samples from a TSDB.
| Flag | Description | Default |
| --- | --- | --- |
| <code class="text-nowrap">--sandbox-dir-root</code> | Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end. | `data/` |
| <code class="text-nowrap">--min-time</code> | Minimum timestamp to dump. | `-9223372036854775808` |
| <code class="text-nowrap">--max-time</code> | Maximum timestamp to dump. | `9223372036854775807` |
| <code class="text-nowrap">--match</code> | Series selector. Can be specified multiple times. | `{__name__=~'(?s:.*)'}` |

View file

@ -71,6 +71,10 @@ global:
# How frequently to evaluate rules.
[ evaluation_interval: <duration> | default = 1m ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past to ensure the underlying metrics have been received.
# Metric availability delays are more likely to occur when Prometheus is running as a remote write target, but can also occur when there's anomalies with scraping.
[ rule_query_offset: <duration> | default = 0s ]
# The labels to add to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
@ -117,6 +121,12 @@ global:
# that will be kept in memory. 0 means no limit.
[ keep_dropped_targets: <int> | default = 0 ]
runtime:
# Configure the Go garbage collector GOGC parameter
# See: https://tip.golang.org/doc/gc-guide#GOGC
# Lowering this number increases CPU usage.
[ gogc: <int> | default = 75 ]
# Rule files specifies a list of globs. Rules and alerts are read from
# all matching files.
rule_files:
@ -931,6 +941,9 @@ tls_config:
# The host to use if the container is in host networking mode.
[ host_networking_host: <string> | default = "localhost" ]
# Match the first network if the container has multiple networks defined, thus avoiding collecting duplicate targets.
[ match_first_network: <boolean> | default = true ]
# Optional filters to limit the discovery process to a subset of available
# resources.
# The available filters are listed in the upstream documentation:
@ -1219,6 +1232,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_ec2_ipv6_addresses`: comma separated list of IPv6 addresses assigned to the instance's network interfaces, if present
* `__meta_ec2_owner_id`: the ID of the AWS account that owns the EC2 instance
* `__meta_ec2_platform`: the Operating System platform, set to 'windows' on Windows servers, absent otherwise
* `__meta_ec2_primary_ipv6_addresses`: comma separated list of the Primary IPv6 addresses of the instance, if present. The list is ordered based on the position of each corresponding network interface in the attachment order.
* `__meta_ec2_primary_subnet_id`: the subnet ID of the primary network interface, if available
* `__meta_ec2_private_dns_name`: the private DNS name of the instance, if available
* `__meta_ec2_private_ip`: the private IP address of the instance, if present
@ -1349,7 +1363,7 @@ interface.
The following meta labels are available on targets during [relabeling](#relabel_config):
* `__meta_openstack_address_pool`: the pool of the private IP.
* `__meta_openstack_instance_flavor`: the flavor of the OpenStack instance.
* `__meta_openstack_instance_flavor`: the flavor name of the OpenStack instance, or the flavor ID if the flavor name isn't available.
* `__meta_openstack_instance_id`: the OpenStack instance ID.
* `__meta_openstack_instance_image`: the ID of the image the OpenStack instance is using.
* `__meta_openstack_instance_name`: the OpenStack instance name.
@ -1357,7 +1371,7 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_openstack_private_ip`: the private IP of the OpenStack instance.
* `__meta_openstack_project_id`: the project (tenant) owning this instance.
* `__meta_openstack_public_ip`: the public IP of the OpenStack instance.
* `__meta_openstack_tag_<tagkey>`: each tag value of the instance.
* `__meta_openstack_tag_<key>`: each metadata item of the instance, with any unsupported characters converted to an underscore.
* `__meta_openstack_user_id`: the user account owning the tenant.
See below for the configuration options for OpenStack discovery:
@ -1467,6 +1481,7 @@ For OVHcloud's [public cloud instances](https://www.ovhcloud.com/en/public-cloud
* `__meta_ovhcloud_dedicated_server_ipv6`: the IPv6 of the server
* `__meta_ovhcloud_dedicated_server_link_speed`: the link speed of the server
* `__meta_ovhcloud_dedicated_server_name`: the name of the server
* `__meta_ovhcloud_dedicated_server_no_intervention`: whether datacenter intervention is disabled for the server
* `__meta_ovhcloud_dedicated_server_os`: the operating system of the server
* `__meta_ovhcloud_dedicated_server_rack`: the rack of the server
* `__meta_ovhcloud_dedicated_server_reverse`: the reverse DNS name of the server
@ -1597,7 +1612,16 @@ and serves as an interface to plug in custom service discovery mechanisms.
It reads a set of files containing a list of zero or more
`<static_config>`s. Changes to all defined files are detected via disk watches
and applied immediately. Files may be provided in YAML or JSON format. Only
and applied immediately.
While those individual files are watched for changes,
the parent directory is also watched implicitly. This is to handle [atomic
renaming](https://github.com/fsnotify/fsnotify/blob/c1467c02fba575afdb5f4201072ab8403bbf00f4/README.md?plain=1#L128) efficiently and to detect new files that match the configured globs.
This may cause issues if the parent directory contains a large number of other files,
as each of these files will be watched too, even though the events related
to them are not relevant.
Files may be provided in YAML or JSON format. Only
changes resulting in well-formed target groups are applied.
Files must contain a list of static configs, using these formats:
@ -2952,9 +2976,10 @@ The following meta labels are available on targets during [relabeling](#relabel_
* `__meta_scaleway_instance_type`: commercial type of the server
* `__meta_scaleway_instance_zone`: the zone of the server (ex: `fr-par-1`, complete list [here](https://developers.scaleway.com/en/products/instance/api/#introduction))
This role uses the private IPv4 address by default. This can be
This role uses the first address it finds in the following order: private IPv4, public IPv4, public IPv6. This can be
changed with relabeling, as demonstrated in [the Prometheus scaleway-sd
configuration file](/documentation/examples/prometheus-scaleway.yml).
Should an instance have no address before relabeling, it will not be added to the target list and you will not be able to relabel it.
#### Baremetal role
@ -3550,6 +3575,17 @@ this functionality.
# The URL of the endpoint to send samples to.
url: <string>
# protobuf message to use when writing to the remote write endpoint.
#
# * The `prometheus.WriteRequest` represents the message introduced in Remote Write 1.0, which
# will be deprecated eventually.
# * The `io.prometheus.write.v2.Request` was introduced in Remote Write 2.0 and replaces the former,
# by improving efficiency and sending metadata, created timestamp and native histograms by default.
#
# Before changing this value, consult with your remote storage provider (or test) what message it supports.
# Read more on https://prometheus.io/docs/specs/remote_write_spec_2_0/#io-prometheus-write-v2-request
[ protobuf_message: <prometheus.WriteRequest | io.prometheus.write.v2.Request> | default = prometheus.WriteRequest ]
# Timeout for requests to the remote write endpoint.
[ remote_timeout: <duration> | default = 30s ]
@ -3571,6 +3607,7 @@ write_relabel_configs:
[ send_exemplars: <boolean> | default = false ]
# Enables sending of native histograms, also known as sparse histograms, over remote write.
# For the `io.prometheus.write.v2.Request` message, this option is noop (always true).
[ send_native_histograms: <boolean> | default = false ]
# Sets the `Authorization` header on every remote write request with the
@ -3584,7 +3621,7 @@ basic_auth:
# Optional `Authorization` header configuration.
authorization:
# Sets the authentication type.
[ type: <string> | default: Bearer ]
[ type: <string> | default = Bearer ]
# Sets the credentials. It is mutually exclusive with
# `credentials_file`.
[ credentials: <secret> ]
@ -3648,7 +3685,7 @@ tls_config:
# contain port numbers.
[ no_proxy: <string> ]
# Use proxy URL indicated by environment variables (HTTP_PROXY, https_proxy, HTTPs_PROXY, https_proxy, and no_proxy)
[ proxy_from_environment: <boolean> | default: false ]
[ proxy_from_environment: <boolean> | default = false ]
# Specifies headers to send to proxies during CONNECT requests.
[ proxy_connect_header:
[ <string>: [<secret>, ...] ] ]
@ -3657,7 +3694,7 @@ tls_config:
[ follow_redirects: <boolean> | default = true ]
# Whether to enable HTTP2.
[ enable_http2: <boolean> | default: true ]
[ enable_http2: <boolean> | default = true ]
# Configures the queue used to write to remote storage.
queue_config:
@ -3672,7 +3709,8 @@ queue_config:
[ min_shards: <int> | default = 1 ]
# Maximum number of samples per send.
[ max_samples_per_send: <int> | default = 2000]
# Maximum time a sample will wait in buffer.
# Maximum time a sample will wait for a send. The sample might wait less
# if the buffer is full. Further time might pass due to potential retries.
[ batch_send_deadline: <duration> | default = 5s ]
# Initial retry delay. Gets doubled for every retry.
[ min_backoff: <duration> | default = 30ms ]
@ -3686,7 +3724,10 @@ queue_config:
# which means that all samples are sent.
[ sample_age_limit: <duration> | default = 0s ]
# Configures the sending of series metadata to remote storage.
# Configures the sending of series metadata to remote storage
# if the `prometheus.WriteRequest` message was chosen. When
# `io.prometheus.write.v2.Request` is used, metadata is always sent.
#
# Metadata configuration is subject to change at any point
# or be removed in future releases.
metadata_config:
@ -3800,6 +3841,10 @@ NOTE: Out-of-order ingestion is an experimental feature, but you do not need any
# into the TSDB, i.e. it is an in-order sample or an out-of-order/out-of-bounds sample
# that is within the out-of-order window, or (b) too-old, i.e. not in-order
# and before the out-of-order window.
#
# When out_of_order_time_window is greater than 0, it also affects experimental agent. It allows
# the agent's WAL to accept out-of-order samples that fall within the specified time window relative
# to the timestamp of the last appended sample for the same series.
[ out_of_order_time_window: <duration> | default = 0s ]
```

View file

@ -86,6 +86,9 @@ name: <string>
# rule can produce. 0 is no limit.
[ limit: <int> | default = 0 ]
# Offset the rule evaluation timestamp of this particular group by the specified duration into the past.
[ query_offset: <duration> | default = global.rule_query_offset ]
rules:
[ - <rule> ... ]
```
@ -148,6 +151,9 @@ the rule, active, pending, or inactive, are cleared as well. The event will be
recorded as an error in the evaluation, and as such no stale markers are
written.
# Rule query offset
This is useful to ensure the underlying metrics have been received and stored in Prometheus. Metric availability delays are more likely to occur when Prometheus is running as a remote write target due to the nature of distributed systems, but can also occur when there's anomalies with scraping and/or short evaluation intervals.
# Failed rule evaluations due to slow evaluation
If a rule group hasn't finished evaluating before its next evaluation is supposed to start (as defined by the `evaluation_interval`), the next evaluation will be skipped. Subsequent evaluations of the rule group will continue to be skipped until the initial evaluation either completes or times out. When this happens, there will be a gap in the metric produced by the recording rule. The `rule_group_iterations_missed_total` metric will be incremented for each missed iteration of the rule group.

View file

@ -230,3 +230,13 @@ The number of concurrent rule evaluations can be configured with `--rules.max-co
Enables the new experimental web UI instead of the old and stable web UI. The new UI is a complete rewrite and aims to be cleaner, less cluttered, and more modern under the hood. It is not feature complete yet and is still under active development.
`--enable-feature=new-ui`
## Metadata WAL Records
`--enable-feature=metadata-wal-records`
When enabled, Prometheus will store metadata in-memory and keep track of
metadata changes as WAL records on a per-series basis.
This must be used if
you are also using remote write 2.0 as it will only gather metadata from the WAL.

View file

@ -31,11 +31,19 @@ production deployments it is highly recommended to use a
[named volume](https://docs.docker.com/storage/volumes/)
to ease managing the data on Prometheus upgrades.
To provide your own configuration, there are several options. Here are
two examples.
### Setting command line parameters
The Docker image is started with a number of default command line parameters, which
can be found in the [Dockerfile](https://github.com/prometheus/prometheus/blob/main/Dockerfile) (adjust the link to correspond with the version in use).
If you want to add extra command line parameters to the `docker run` command,
you will need to re-add these yourself as they will be overwritten.
### Volumes & bind-mount
To provide your own configuration, there are several options. Here are
two examples.
Bind-mount your `prometheus.yml` from the host by running:
```bash

View file

@ -25,8 +25,10 @@ Other non-`2xx` codes may be returned for errors occurring before the API
endpoint is reached.
An array of warnings may be returned if there are errors that do
not inhibit the request execution. All of the data that was successfully
collected will be returned in the data field.
not inhibit the request execution. An additional array of info-level
annotations may be returned for potential query issues that may or may
not be false positives. All of the data that was successfully collected
will be returned in the data field.
The JSON response envelope format is as follows:
@ -40,9 +42,11 @@ The JSON response envelope format is as follows:
"errorType": "<string>",
"error": "<string>",
// Only if there were warnings while executing the request.
// Only set if there were warnings while executing the request.
// There will still be data in the data field.
"warnings": ["<string>"]
"warnings": ["<string>"],
// Only set if there were info-level annnotations while executing the request.
"infos": ["<string>"]
}
```
@ -473,6 +477,9 @@ Range vectors are returned as result type `matrix`. The corresponding
Each series could have the `"values"` key, or the `"histograms"` key, or both.
For a given timestamp, there will only be one sample of either float or histogram type.
Series are returned sorted by `metric`. Functions such as [`sort`](functions.md#sort)
and [`sort_by_label`](functions.md#sort_by_label) have no effect for range vectors.
### Instant vectors
Instant vectors are returned as result type `vector`. The corresponding
@ -491,6 +498,10 @@ Instant vectors are returned as result type `vector`. The corresponding
Each series could have the `"value"` key, or the `"histogram"` key, but not both.
Series are not guaranteed to be returned in any particular order unless a function
such as [`sort`](functions.md#sort) or [`sort_by_label`](functions.md#sort_by_label)`
is used.
### Scalars
Scalar results are returned as result type `scalar`. The corresponding
@ -683,6 +694,7 @@ URL query parameters:
- `rule_group[]=<string>`: only return rules with the given rule group name. If the parameter is repeated, rules with any of the provided rule group names are returned. When the parameter is absent or empty, no filtering is done.
- `file[]=<string>`: only return rules with the given filepath. If the parameter is repeated, rules with any of the provided filepaths are returned. When the parameter is absent or empty, no filtering is done.
- `exclude_alerts=<bool>`: only return rules, do not return active alerts.
- `match[]=<label_selector>`: only return rules that have configured labels that satisfy the label selectors. If the parameter is repeated, rules that match any of the sets of label selectors are returned. Note that matching is on the labels in the definition of each rule, not on the values after template expansion (for alerting rules). Optional.
```json
$ curl http://localhost:9090/api/v1/rules

View file

@ -95,3 +95,13 @@ Assuming this metric contains one time series per running instance, you could
count the number of running instances per application like this:
count by (app) (instance_cpu_time_ns)
If we are exploring some metrics for their labels, to e.g. be able to aggregate
over some of them, we could use the following:
limitk(10, app_foo_metric_bar)
Alternatively, if we wanted the returned timeseries to be more evenly sampled,
we could use the following to get approximately 10% of them:
limit_ratio(0.1, app_foo_metric_bar)

View file

@ -79,7 +79,12 @@ labels of the 1-element output vector from the input vector.
## `ceil()`
`ceil(v instant-vector)` rounds the sample values of all elements in `v` up to
the nearest integer.
the nearest integer value greater than or equal to v.
* `ceil(+Inf) = +Inf`
* `ceil(±0) = ±0`
* `ceil(1.49) = 2.0`
* `ceil(1.78) = 2.0`
## `changes()`
@ -173,7 +178,12 @@ Special cases are:
## `floor()`
`floor(v instant-vector)` rounds the sample values of all elements in `v` down
to the nearest integer.
to the nearest integer value smaller than or equal to v.
* `floor(+Inf) = +Inf`
* `floor(±0) = ±0`
* `floor(1.49) = 1.0`
* `floor(1.78) = 1.0`
## `histogram_avg()`
@ -596,10 +606,14 @@ have exactly one element, `scalar` will return `NaN`.
`sort(v instant-vector)` returns vector elements sorted by their sample values,
in ascending order. Native histograms are sorted by their sum of observations.
Please note that `sort` only affects the results of instant queries, as range query results always have a fixed output ordering.
## `sort_desc()`
Same as `sort`, but sorts in descending order.
Like `sort`, `sort_desc` only affects the results of instant queries, as range query results always have a fixed output ordering.
## `sort_by_label()`
**This function has to be enabled via the [feature flag](../feature_flags/) `--enable-feature=promql-experimental-functions`.**

View file

@ -230,6 +230,8 @@ vector of fewer elements with aggregated values:
* `bottomk` (smallest k elements by sample value)
* `topk` (largest k elements by sample value)
* `quantile` (calculate φ-quantile (0 ≤ φ ≤ 1) over dimensions)
* `limitk` (sample n elements)
* `limit_ratio` (sample elements with approximately 𝑟 ratio if `𝑟 > 0`, and the complement of such samples if `𝑟 = -(1.0 - 𝑟)`)
These operators can either be used to aggregate over **all** label dimensions
or preserve distinct dimensions by including a `without` or `by` clause. These
@ -249,8 +251,8 @@ all other labels are preserved in the output. `by` does the opposite and drops
labels that are not listed in the `by` clause, even if their label values are
identical between all elements of the vector.
`parameter` is only required for `count_values`, `quantile`, `topk` and
`bottomk`.
`parameter` is only required for `count_values`, `quantile`, `topk`,
`bottomk`, `limitk` and `limit_ratio`.
`count_values` outputs one time series per unique sample value. Each series has
an additional label. The name of that label is given by the aggregation
@ -261,11 +263,16 @@ time series is the number of times that sample value was present.
the input samples, including the original labels, are returned in the result
vector. `by` and `without` are only used to bucket the input vector.
`limitk` and `limit_ratio` also return a subset of the input samples,
including the original labels in the result vector, these are experimental
operators that must be enabled with `--enable-feature=promql-experimental-functions`.
`quantile` calculates the φ-quantile, the value that ranks at number φ*N among
the N metric values of the dimensions aggregated over. φ is provided as the
aggregation parameter. For example, `quantile(0.5, ...)` calculates the median,
`quantile(0.95, ...)` the 95th percentile. For φ = `NaN`, `NaN` is returned. For φ < 0, `-Inf` is returned. For φ > 1, `+Inf` is returned.
Example:
If the metric `http_requests_total` had time series that fan out by
@ -291,6 +298,33 @@ To get the 5 largest HTTP requests counts across all instances we could write:
topk(5, http_requests_total)
To sample 10 timeseries, for example to inspect labels and their values, we
could write:
limitk(10, http_requests_total)
To deterministically sample approximately 10% of timeseries we could write:
limit_ratio(0.1, http_requests_total)
Given that `limit_ratio()` implements a deterministic sampling algorithm (based
on labels' hash), you can get the _complement_ of the above samples, i.e.
approximately 90%, but precisely those not returned by `limit_ratio(0.1, ...)`
with:
limit_ratio(-0.9, http_requests_total)
You can also use this feature to e.g. verify that `avg()` is a representative
aggregation for your samples' values, by checking that the difference between
averaging two samples' subsets is "small" when compared to the standard
deviation.
abs(
avg(limit_ratio(0.5, http_requests_total))
-
avg(limit_ratio(-0.5, http_requests_total))
) <= bool stddev(http_requests_total)
## Binary operator precedence
The following list shows the precedence of binary operators in Prometheus, from

View file

@ -5,63 +5,7 @@ sort_rank: 7
# Remote Read API
This is not currently considered part of the stable API and is subject to change
even between non-major version releases of Prometheus.
## Format overview
The API response format is JSON. Every successful API request returns a `2xx`
status code.
Invalid requests that reach the API handlers return a JSON error object
and one of the following HTTP response codes:
- `400 Bad Request` when parameters are missing or incorrect.
- `422 Unprocessable Entity` when an expression can't be executed
([RFC4918](https://tools.ietf.org/html/rfc4918#page-78)).
- `503 Service Unavailable` when queries time out or abort.
Other non-`2xx` codes may be returned for errors occurring before the API
endpoint is reached.
An array of warnings may be returned if there are errors that do
not inhibit the request execution. All of the data that was successfully
collected will be returned in the data field.
The JSON response envelope format is as follows:
```
{
"status": "success" | "error",
"data": <data>,
// Only set if status is "error". The data field may still hold
// additional data.
"errorType": "<string>",
"error": "<string>",
// Only if there were warnings while executing the request.
// There will still be data in the data field.
"warnings": ["<string>"]
}
```
Generic placeholders are defined as follows:
* `<rfc3339 | unix_timestamp>`: Input timestamps may be provided either in
[RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format or as a Unix timestamp
in seconds, with optional decimal places for sub-second precision. Output
timestamps are always represented as Unix timestamps in seconds.
* `<series_selector>`: Prometheus [time series
selectors](basics.md#time-series-selectors) like `http_requests_total` or
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
* `<duration>`: [Prometheus duration strings](basics.md#time_durations).
For example, `5m` refers to a duration of 5 minutes.
* `<bool>`: boolean values (strings `true` and `false`).
Note: Names of query parameters that may be repeated end with `[]`.
## Remote Read API
> This is not currently considered part of the stable API and is subject to change even between non-major version releases of Prometheus.
This API provides data read functionality from Prometheus. This interface expects [snappy](https://github.com/google/snappy) compression.
The API definition is located [here](https://github.com/prometheus/prometheus/blob/master/prompb/remote.proto).
@ -79,5 +23,3 @@ This returns a message that includes a list of raw samples.
These streamed chunks utilize an XOR algorithm inspired by the [Gorilla](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf)
compression to encode the chunks. However, it provides resolution to the millisecond instead of to the second.

View file

@ -61,8 +61,11 @@ A Prometheus server's data directory looks something like this:
Note that a limitation of local storage is that it is not clustered or
replicated. Thus, it is not arbitrarily scalable or durable in the face of
drive or node outages and should be managed like any other single node
database. The use of RAID is suggested for storage availability, and
[snapshots](querying/api.md#snapshot) are recommended for backups. With proper
database.
[Snapshots](querying/api.md#snapshot) are recommended for backups. Backups
made without snapshots run the risk of losing data that was recorded since
the last WAL sync, which typically happens every two hours. With proper
architecture, it is possible to retain years of data in local storage.
Alternatively, external storage may be used via the
@ -84,8 +87,10 @@ or 31 days, whichever is smaller.
Prometheus has several flags that configure local storage. The most important are:
- `--storage.tsdb.path`: Where Prometheus writes its database. Defaults to `data/`.
- `--storage.tsdb.retention.time`: When to remove old data. Defaults to `15d`.
Overrides `storage.tsdb.retention` if this flag is set to anything other than default.
- `--storage.tsdb.retention.time`: How long to retain samples in storage. When this flag is
set, it overrides `storage.tsdb.retention`. If neither this flag nor `storage.tsdb.retention`
nor `storage.tsdb.retention.size` is set, the retention time defaults to `15d`.
Supported units: y, w, d, h, m, s, ms.
- `--storage.tsdb.retention.size`: The maximum number of bytes of storage blocks to retain.
The oldest data will be removed first. Defaults to `0` or disabled. Units supported:
B, KB, MB, GB, TB, PB, EB. Ex: "512MB". Based on powers-of-2, so 1KB is 1024B. Only
@ -195,6 +200,9 @@ or time-series database to Prometheus. To do so, the user must first convert the
source data into [OpenMetrics](https://openmetrics.io/) format, which is the
input format for the backfilling as described below.
Note that native histograms and staleness markers are not supported by this
procedure, as they cannot be represented in the OpenMetrics format.
### Usage
Backfilling can be used via the Promtool command line. Promtool will write the blocks

View file

@ -127,9 +127,9 @@ func (d *discovery) parseServiceNodes(resp *http.Response, name string) (*target
// since the service may be registered remotely through a different node.
var addr string
if node.ServiceAddress != "" {
addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort))
addr = net.JoinHostPort(node.ServiceAddress, strconv.Itoa(node.ServicePort))
} else {
addr = net.JoinHostPort(node.Address, fmt.Sprintf("%d", node.ServicePort))
addr = net.JoinHostPort(node.Address, strconv.Itoa(node.ServicePort))
}
target := model.LabelSet{model.AddressLabel: model.LabelValue(addr)}

View file

@ -7,6 +7,7 @@ To use it:
```
go build
./example_write_adapter
```
@ -15,10 +16,19 @@ go build
```yaml
remote_write:
- url: "http://localhost:1234/receive"
protobuf_message: "io.prometheus.write.v2.Request"
```
Then start Prometheus:
or for deprecated Remote Write 1.0 message:
```yaml
remote_write:
- url: "http://localhost:1234/receive"
protobuf_message: "prometheus.WriteRequest"
```
Then start Prometheus (in separate terminal):
```
./prometheus
./prometheus --enable-feature=metadata-wal-records
```

View file

@ -18,44 +18,103 @@ import (
"log"
"net/http"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/prompb"
writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2"
"github.com/prometheus/prometheus/storage/remote"
)
func main() {
http.HandleFunc("/receive", func(w http.ResponseWriter, r *http.Request) {
enc := r.Header.Get("Content-Encoding")
if enc == "" {
http.Error(w, "missing Content-Encoding header", http.StatusUnsupportedMediaType)
return
}
if enc != "snappy" {
http.Error(w, "unknown encoding, only snappy supported", http.StatusUnsupportedMediaType)
return
}
contentType := r.Header.Get("Content-Type")
if contentType == "" {
http.Error(w, "missing Content-Type header", http.StatusUnsupportedMediaType)
}
defer func() { _ = r.Body.Close() }()
// Very simplistic content parsing, see
// storage/remote/write_handler.go#WriteHandler.ServeHTTP for production example.
switch contentType {
case "application/x-protobuf", "application/x-protobuf;proto=prometheus.WriteRequest":
req, err := remote.DecodeWriteRequest(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
for _, ts := range req.Timeseries {
m := make(model.Metric, len(ts.Labels))
for _, l := range ts.Labels {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
printV1(req)
case "application/x-protobuf;proto=io.prometheus.write.v2.Request":
req, err := remote.DecodeWriteV2Request(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
fmt.Println(m)
printV2(req)
default:
msg := fmt.Sprintf("Unknown remote write content type: %s", contentType)
fmt.Println(msg)
http.Error(w, msg, http.StatusBadRequest)
}
})
log.Fatal(http.ListenAndServe(":1234", nil))
}
func printV1(req *prompb.WriteRequest) {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
fmt.Println(ts.ToLabels(&b, nil))
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, e := range ts.Exemplars {
m := make(model.Metric, len(e.Labels))
for _, l := range e.Labels {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, nil)
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
fmt.Printf("\tExemplar: %+v %f %d\n", m, e.Value, e.Timestamp)
}
for _, hp := range ts.Histograms {
h := remote.HistogramProtoToHistogram(hp)
if hp.IsFloatHistogram() {
h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
continue
}
h := hp.ToIntHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
}
func printV2(req *writev2.Request) {
b := labels.NewScratchBuilder(0)
for _, ts := range req.Timeseries {
l := ts.ToLabels(&b, req.Symbols)
m := ts.ToMetadata(req.Symbols)
fmt.Println(l, m)
for _, s := range ts.Samples {
fmt.Printf("\tSample: %f %d\n", s.Value, s.Timestamp)
}
for _, ep := range ts.Exemplars {
e := ep.ToExemplar(&b, req.Symbols)
fmt.Printf("\tExemplar: %+v %f %d\n", e.Labels, e.Value, ep.Timestamp)
}
for _, hp := range ts.Histograms {
if hp.IsFloatHistogram() {
h := hp.ToFloatHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
continue
}
h := hp.ToIntHistogram()
fmt.Printf("\tHistogram: %s\n", h.String())
}
}
})
log.Fatal(http.ListenAndServe(":1234", nil))
}

View file

@ -8,70 +8,67 @@ require (
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.5
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/common v0.53.0
github.com/prometheus/prometheus v0.51.1
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/common v0.55.0
github.com/prometheus/prometheus v0.52.1
github.com/stretchr/testify v1.9.0
)
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.50.32 // indirect
github.com/aws/aws-sdk-go v1.53.16 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/klauspost/compress v1.17.8 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.opentelemetry.io/collector/featuregate v1.3.0 // indirect
go.opentelemetry.io/collector/pdata v1.3.0 // indirect
go.opentelemetry.io/collector/semconv v0.96.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.opentelemetry.io/collector/pdata v1.8.0 // indirect
go.opentelemetry.io/collector/semconv v0.101.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
go.opentelemetry.io/otel v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
golang.org/x/net v0.22.0 // indirect
golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sys v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect
google.golang.org/grpc v1.62.1 // indirect
google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apimachinery v0.29.2 // indirect
k8s.io/client-go v0.29.2 // indirect
k8s.io/apimachinery v0.29.3 // indirect
k8s.io/client-go v0.29.3 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
)
@ -83,4 +80,10 @@ exclude (
cloud.google.com/go v0.34.0
cloud.google.com/go v0.65.0
cloud.google.com/go v0.82.0
// Fixing ambiguous import: found package google.golang.org/genproto/googleapis/api/annotations in multiple modules.
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
)
// TODO(bwplotka): Move to main branch commit or perhaps released version.
replace github.com/prometheus/prometheus => github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c

View file

@ -1,17 +1,17 @@
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g=
github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
@ -26,8 +26,8 @@ github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8V
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY=
github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -35,10 +35,10 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -46,14 +46,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU=
github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs=
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -68,8 +68,8 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
@ -95,16 +95,16 @@ github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdX
github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4=
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -115,10 +115,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
@ -137,40 +135,38 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk=
github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA=
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928=
github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA=
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/influxdata/influxdb v1.11.5 h1:+em5VOl6lhAZubXj5o6SobCwvrRs3XDlBx/MUI4schI=
@ -194,8 +190,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -210,21 +206,23 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4=
github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk=
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -243,8 +241,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -258,33 +256,33 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.51.1 h1:V2e7x2oiUC0Megp26+xjffxBf9EGkyP1iQuGd4VjUSU=
github.com/prometheus/prometheus v0.51.1/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c h1:6GEA48LnonkYZhQ654v7QTIP5uBTbCEVm49oIhif5lc=
github.com/prometheus/prometheus v0.53.1-0.20240704074759-c137febfcf8c/go.mod h1:FcNs5wa7M9yV8IlxlB/05s5oy9vULUIlu/tZsviRIT8=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -306,21 +304,18 @@ github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/collector/featuregate v1.3.0 h1:nrFSx+zfjdisjE9oCx25Aep3nJ9RaUjeE1qFL6eovoU=
go.opentelemetry.io/collector/featuregate v1.3.0/go.mod h1:mm8+xyQfgDmqhyegZRNIQmoKsNnDTwWKFLsdMoXAb7A=
go.opentelemetry.io/collector/pdata v1.3.0 h1:JRYN7tVHYFwmtQhIYbxWeiKSa2L1nCohyAs8sYqKFZo=
go.opentelemetry.io/collector/pdata v1.3.0/go.mod h1:t7W0Undtes53HODPdSujPLTnfSR5fzT+WpL+RTaaayo=
go.opentelemetry.io/collector/semconv v0.96.0 h1:DrZy8BpzJDnN2zFxXRj6BhfGYxNlqpFHBqyuS9fVHRY=
go.opentelemetry.io/collector/semconv v0.96.0/go.mod h1:zOm/U3pgMIWcvrcnPbR9Xx2HinoXj46ERMK8PUV9wrs=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -331,16 +326,14 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -353,24 +346,21 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -385,48 +375,38 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ=
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -434,9 +414,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -457,16 +436,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A=
k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0=
k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8=
k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg=
k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA=
k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=

View file

@ -34,6 +34,20 @@
description: 'Prometheus %(prometheusName)s has failed to refresh SD with mechanism {{$labels.mechanism}}.' % $._config,
},
},
{
alert: 'PrometheusKubernetesListWatchFailures',
expr: |||
increase(prometheus_sd_kubernetes_failures_total{%(prometheusSelector)s}[5m]) > 0
||| % $._config,
'for': '15m',
labels: {
severity: 'warning',
},
annotations: {
summary: 'Requests in Kubernetes SD are failing.',
description: 'Kubernetes service discovery of Prometheus %(prometheusName)s is experiencing {{ printf "%%.0f" $value }} failures with LIST/WATCH requests to the Kubernetes API in the last 5 minutes.' % $._config,
},
},
{
alert: 'PrometheusNotificationQueueRunningFull',
expr: |||

View file

@ -44,5 +44,10 @@
// The default refresh time for all dashboards, default to 60s
refresh: '60s',
},
// Opt-out of multi-cluster dashboards by overriding this.
showMultiCluster: true,
// The cluster label to infer the cluster name from.
clusterLabel: 'cluster',
},
}

View file

@ -10,21 +10,32 @@ local template = grafana.template;
{
grafanaDashboards+:: {
'prometheus.json':
g.dashboard(
local showMultiCluster = $._config.showMultiCluster;
local dashboard = g.dashboard(
'%(prefix)sOverview' % $._config.grafanaPrometheus
)
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'cluster')
);
local templatedDashboard = if showMultiCluster then
dashboard
.addMultiTemplate('cluster', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, $._config.clusterLabel)
.addMultiTemplate('job', 'prometheus_build_info{cluster=~"$cluster"}', 'job')
.addMultiTemplate('instance', 'prometheus_build_info{cluster=~"$cluster", job=~"$job"}', 'instance')
else
dashboard
.addMultiTemplate('job', 'prometheus_build_info{%(prometheusSelector)s}' % $._config, 'job')
.addMultiTemplate('instance', 'prometheus_build_info{job=~"$job"}', 'instance');
templatedDashboard
.addRow(
g.row('Prometheus Stats')
.addPanel(
g.panel('Prometheus Stats') +
g.tablePanel([
g.tablePanel(if showMultiCluster then [
'count by (cluster, job, instance, version) (prometheus_build_info{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
'max by (cluster, job, instance) (time() - process_start_time_seconds{cluster=~"$cluster", job=~"$job", instance=~"$instance"})',
] else [
'count by (job, instance, version) (prometheus_build_info{job=~"$job", instance=~"$instance"})',
'max by (job, instance) (time() - process_start_time_seconds{job=~"$job", instance=~"$instance"})',
], {
cluster: { alias: 'Cluster' },
cluster: { alias: if showMultiCluster then 'Cluster' else '' },
job: { alias: 'Job' },
instance: { alias: 'Instance' },
version: { alias: 'Version' },
@ -37,12 +48,18 @@ local template = grafana.template;
g.row('Discovery')
.addPanel(
g.panel('Target Sync') +
g.queryPanel('sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3', '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}') +
g.queryPanel(if showMultiCluster then 'sum(rate(prometheus_target_sync_length_seconds_sum{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[5m])) by (cluster, job, scrape_job, instance) * 1e3'
else 'sum(rate(prometheus_target_sync_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m])) by (scrape_job) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}:{{scrape_job}}'
else '{{scrape_job}}') +
{ yaxes: g.yaxes('ms') }
)
.addPanel(
g.panel('Targets') +
g.queryPanel('sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})', '{{cluster}}:{{job}}:{{instance}}') +
g.queryPanel(if showMultiCluster then 'sum by (cluster, job, instance) (prometheus_sd_discovered_targets{cluster=~"$cluster", job=~"$job",instance=~"$instance"})'
else 'sum(prometheus_sd_discovered_targets{job=~"$job",instance=~"$instance"})',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}}'
else 'Targets') +
g.stack
)
)
@ -50,29 +67,47 @@ local template = grafana.template;
g.row('Retrieval')
.addPanel(
g.panel('Average Scrape Interval Duration') +
g.queryPanel('rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3', '{{cluster}}:{{job}}:{{instance}} {{interval}} configured') +
g.queryPanel(if showMultiCluster then 'rate(prometheus_target_interval_length_seconds_sum{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m]) * 1e3'
else 'rate(prometheus_target_interval_length_seconds_sum{job=~"$job",instance=~"$instance"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~"$job",instance=~"$instance"}[5m]) * 1e3',
if showMultiCluster then '{{cluster}}:{{job}}:{{instance}} {{interval}} configured'
else '{{interval}} configured') +
{ yaxes: g.yaxes('ms') }
)
.addPanel(
g.panel('Scrape failures') +
g.queryPanel([
g.queryPanel(if showMultiCluster then [
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_exceeded_sample_limit_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_bounds_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
'sum by (cluster, job, instance) (rate(prometheus_target_scrapes_sample_out_of_order_total{cluster=~"$cluster",job=~"$job",instance=~"$instance"}[1m]))',
], [
] else [
'sum by (job) (rate(prometheus_target_scrapes_exceeded_body_size_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))',
'sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))',
], if showMultiCluster then [
'exceeded body size limit: {{cluster}} {{job}} {{instance}}',
'exceeded sample limit: {{cluster}} {{job}} {{instance}}',
'duplicate timestamp: {{cluster}} {{job}} {{instance}}',
'out of bounds: {{cluster}} {{job}} {{instance}}',
'out of order: {{cluster}} {{job}} {{instance}}',
] else [
'exceeded body size limit: {{job}}',
'exceeded sample limit: {{job}}',
'duplicate timestamp: {{job}}',
'out of bounds: {{job}}',
'out of order: {{job}}',
]) +
g.stack
)
.addPanel(
g.panel('Appended Samples') +
g.queryPanel('rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])', '{{cluster}} {{job}} {{instance}}') +
g.queryPanel(if showMultiCluster then 'rate(prometheus_tsdb_head_samples_appended_total{cluster=~"$cluster", job=~"$job",instance=~"$instance"}[5m])'
else 'rate(prometheus_tsdb_head_samples_appended_total{job=~"$job",instance=~"$instance"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
g.stack
)
)
@ -80,12 +115,18 @@ local template = grafana.template;
g.row('Storage')
.addPanel(
g.panel('Head Series') +
g.queryPanel('prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head series') +
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_series{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_series{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head series'
else '{{job}} {{instance}} head series') +
g.stack
)
.addPanel(
g.panel('Head Chunks') +
g.queryPanel('prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}', '{{cluster}} {{job}} {{instance}} head chunks') +
g.queryPanel(if showMultiCluster then 'prometheus_tsdb_head_chunks{cluster=~"$cluster",job=~"$job",instance=~"$instance"}'
else 'prometheus_tsdb_head_chunks{job=~"$job",instance=~"$instance"}',
if showMultiCluster then '{{cluster}} {{job}} {{instance}} head chunks'
else '{{job}} {{instance}} head chunks') +
g.stack
)
)
@ -93,12 +134,18 @@ local template = grafana.template;
g.row('Query')
.addPanel(
g.panel('Query Rate') +
g.queryPanel('rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])', '{{cluster}} {{job}} {{instance}}') +
g.queryPanel(if showMultiCluster then 'rate(prometheus_engine_query_duration_seconds_count{cluster=~"$cluster",job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])'
else 'rate(prometheus_engine_query_duration_seconds_count{job=~"$job",instance=~"$instance",slice="inner_eval"}[5m])',
if showMultiCluster then '{{cluster}} {{job}} {{instance}}'
else '{{job}} {{instance}}') +
g.stack,
)
.addPanel(
g.panel('Stage Duration') +
g.queryPanel('max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3', '{{slice}}') +
g.queryPanel(if showMultiCluster then 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",cluster=~"$cluster", job=~"$job",instance=~"$instance"}) * 1e3'
else 'max by (slice) (prometheus_engine_query_duration_seconds{quantile="0.9",job=~"$job",instance=~"$instance"}) * 1e3',
if showMultiCluster then '{{slice}}'
else '{{slice}}') +
{ yaxes: g.yaxes('ms') } +
g.stack,
)

135
go.mod
View file

@ -3,20 +3,20 @@ module github.com/prometheus/prometheus
go 1.21
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0
github.com/Code-Hex/go-generics-cache v1.3.1
github.com/KimMachineGun/automemlimit v0.5.0
github.com/Code-Hex/go-generics-cache v1.5.1
github.com/KimMachineGun/automemlimit v0.6.1
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
github.com/aws/aws-sdk-go v1.50.32
github.com/aws/aws-sdk-go v1.53.16
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3
github.com/cespare/xxhash/v2 v2.2.0
github.com/cespare/xxhash/v2 v2.3.0
github.com/dennwc/varint v1.0.0
github.com/digitalocean/godo v1.109.0
github.com/docker/docker v25.0.3+incompatible
github.com/digitalocean/godo v1.117.0
github.com/docker/docker v26.1.3+incompatible
github.com/edsrzf/mmap-go v1.1.0
github.com/envoyproxy/go-control-plane v0.12.0
github.com/envoyproxy/protoc-gen-validate v1.0.4
@ -24,66 +24,66 @@ require (
github.com/fsnotify/fsnotify v1.7.0
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.6.0
github.com/go-openapi/strfmt v0.22.2
github.com/go-openapi/strfmt v0.23.0
github.com/go-zookeeper/zk v1.0.3
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.6.0
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba
github.com/google/uuid v1.6.0
github.com/gophercloud/gophercloud v1.8.0
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd
github.com/gophercloud/gophercloud v1.12.0
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/consul/api v1.28.2
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702
github.com/hetznercloud/hcloud-go/v2 v2.6.0
github.com/hashicorp/consul/api v1.29.1
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d
github.com/hetznercloud/hcloud-go/v2 v2.9.0
github.com/ionos-cloud/sdk-go/v6 v6.1.11
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.7
github.com/klauspost/compress v1.17.8
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b
github.com/linode/linodego v1.30.0
github.com/miekg/dns v1.1.58
github.com/linode/linodego v1.35.0
github.com/miekg/dns v1.1.59
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1
github.com/oklog/run v1.1.0
github.com/oklog/ulid v1.3.1
github.com/ovh/go-ovh v1.4.3
github.com/ovh/go-ovh v1.5.1
github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/client_model v0.6.0
github.com/prometheus/common v0.49.1-0.20240306132007-4199f18c3e92
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.54.0
github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.11.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0
github.com/vultr/govultr/v2 v2.17.2
go.opentelemetry.io/collector/featuregate v1.4.0
go.opentelemetry.io/collector/pdata v1.4.0
go.opentelemetry.io/collector/semconv v0.97.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0
go.opentelemetry.io/otel v1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0
go.opentelemetry.io/otel/sdk v1.24.0
go.opentelemetry.io/otel/trace v1.24.0
go.opentelemetry.io/collector/pdata v1.8.0
go.opentelemetry.io/collector/semconv v0.101.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0
go.opentelemetry.io/otel v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
go.opentelemetry.io/otel/sdk v1.27.0
go.opentelemetry.io/otel/trace v1.27.0
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.5.3
go.uber.org/goleak v1.3.0
go.uber.org/multierr v1.11.0
golang.org/x/net v0.22.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.18.0
golang.org/x/net v0.26.0
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.21.0
golang.org/x/text v0.16.0
golang.org/x/time v0.5.0
golang.org/x/tools v0.19.0
google.golang.org/api v0.168.0
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.33.0
golang.org/x/tools v0.22.0
google.golang.org/api v0.183.0
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157
google.golang.org/grpc v1.64.0
google.golang.org/protobuf v1.34.1
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.29.3
@ -94,17 +94,18 @@ require (
)
require (
cloud.google.com/go/compute v1.23.4 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
cloud.google.com/go/auth v0.5.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cilium/ebpf v0.11.0 // indirect
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect
github.com/containerd/cgroups/v3 v3.0.3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
@ -114,23 +115,23 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.22.2 // indirect
github.com/go-openapi/errors v0.21.1 // indirect
github.com/go-openapi/errors v0.22.0 // indirect
github.com/go-openapi/jsonpointer v0.20.2 // indirect
github.com/go-openapi/jsonreference v0.20.4 // indirect
github.com/go-openapi/loads v0.21.5 // indirect
github.com/go-openapi/spec v0.20.14 // indirect
github.com/go-openapi/swag v0.22.9 // indirect
github.com/go-openapi/validate v0.23.0 // indirect
github.com/go-resty/resty/v2 v2.11.0 // indirect
github.com/go-resty/resty/v2 v2.13.1 // indirect
github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/glog v1.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
@ -139,18 +140,17 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/cronexpr v1.1.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.5.0 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
@ -161,9 +161,10 @@ require (
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@ -182,19 +183,17 @@ require (
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
golang.org/x/crypto v0.21.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/term v0.21.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gotest.tools/v3 v3.0.3 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect

297
go.sum
View file

@ -12,16 +12,18 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw=
cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -34,14 +36,14 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0 h1:ui3YNbxfW7J3tTFIZMH6LIGRjCngp+J+nIFlnizfNTE=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.6.0/go.mod h1:gZmgV+qBqygoznvqo2J9oKZAFziqhLZ2xE/WVUmzkHA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0 h1:sUFnFjzDUie80h24I7mrKtwCKgLY9L8h5Tp2x9+TWqk=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.6.0/go.mod h1:52JbnQTp15qg5mRkMBHwp0j0ZFwHJ42Sx3zVV5RE9p0=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
@ -50,15 +52,15 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g=
github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/KimMachineGun/automemlimit v0.5.0 h1:BeOe+BbJc8L5chL3OwzVYjVzyvPALdd5wxVVOWuUZmQ=
github.com/KimMachineGun/automemlimit v0.5.0/go.mod h1:di3GCKiu9Y+1fs92erCbUvKzPkNyViN3mA0vti/ykEQ=
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
@ -90,8 +92,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:W
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY=
github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go v1.53.16 h1:8oZjKQO/ml1WLUZw5hvF7pvYjPf8o9f57Wldoy/q9Qc=
github.com/aws/aws-sdk-go v1.53.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
@ -102,12 +104,12 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -118,8 +120,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc=
github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
@ -141,14 +143,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU=
github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs=
github.com/digitalocean/godo v1.117.0 h1:WVlTe09melDYTd7VCVyvHcNWbgB+uI1O115+5LOtdSw=
github.com/digitalocean/godo v1.117.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@ -178,8 +180,8 @@ github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
@ -214,8 +216,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0=
github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo=
github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI=
github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8=
github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q=
github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs=
github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU=
@ -224,14 +226,14 @@ github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRz
github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8=
github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do=
github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw=
github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw=
github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4=
github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE=
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw=
github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE=
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
@ -246,8 +248,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
@ -278,8 +280,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -319,8 +319,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
@ -332,10 +332,10 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA=
github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk=
github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
github.com/gophercloud/gophercloud v1.12.0 h1:Jrz16vPAL93l80q16fp8NplrTCp93y7rZh2P3Q4Yq7g=
github.com/gophercloud/gophercloud v1.12.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -343,21 +343,23 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8=
github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE=
github.com/hashicorp/consul/api v1.29.1 h1:UEwOjYJrd3lG1x5w7HxDRMGiAUPrb3f103EoeKuuEcc=
github.com/hashicorp/consul/api v1.29.1/go.mod h1:lumfRkY/coLuqMICkI7Fh3ylMG31mQSRZyef2c5YvJI=
github.com/hashicorp/consul/proto-public v0.6.1 h1:+uzH3olCrksXYWAYHKqK782CtK9scfqH+Unlw3UHhCg=
github.com/hashicorp/consul/proto-public v0.6.1/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8=
github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -367,9 +369,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@ -381,8 +382,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@ -408,13 +409,13 @@ github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA=
github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d h1:KHq+mAzWSkumj4PDoXc5VZbycPGcmYu8tohgVLQ6SIc=
github.com/hashicorp/nomad/api v0.0.0-20240604134157-e73d8bb1140d/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928=
github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA=
github.com/hetznercloud/hcloud-go/v2 v2.9.0 h1:s0N6R7Zoi2DPfMtUF5o9VeUBzTtHVY6MIkHOQnfu/AY=
github.com/hetznercloud/hcloud-go/v2 v2.9.0/go.mod h1:qtW/TuU7Bs16ibXl/ktJarWqU2LwHr7eGlwoilHxtgg=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -453,8 +454,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -471,8 +472,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.30.0 h1:6HJli+LX7NGu+Sne2G+ux790EkVOWOV/SR4mK3jcs6k=
github.com/linode/linodego v1.30.0/go.mod h1:/46h/XpmWi//oSA92GX2p3FIxb8HbX7grslPPQalR2o=
github.com/linode/linodego v1.35.0 h1:rIhUeCHBLEDlkoRnOTwzSGzljQ3ksXwLxacmXnrV+Do=
github.com/linode/linodego v1.35.0/go.mod h1:JxuhOEAMfSxun6RU5/MgTKH2GGTmFrhKRj3wL1NFin0=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -490,8 +491,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
@ -499,8 +500,8 @@ github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwU
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -515,6 +516,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -570,8 +573,8 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY=
github.com/ovh/go-ovh v1.5.1 h1:P8O+7H+NQuFK9P/j4sFW5C0fvSS2DnHYGPwdVCp45wI=
github.com/ovh/go-ovh v1.5.1/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
@ -605,16 +608,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
@ -622,8 +625,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.49.1-0.20240306132007-4199f18c3e92 h1:nuwTDY/15McImfuXcUD6AA3alpUNEXfWws8K/8SXr68=
github.com/prometheus/common v0.49.1-0.20240306132007-4199f18c3e92/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE=
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
@ -642,13 +645,13 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27 h1:yGAraK1uUjlhSXgNMIy8o/J4LFNcy7yeipBqt9N9mVg=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.27/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
@ -720,30 +723,28 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/featuregate v1.4.0 h1:RWE9M659C9iuUQc4GzBsndkGHG1jIzIY+nZJWvcKy1M=
go.opentelemetry.io/collector/featuregate v1.4.0/go.mod h1:w7nUODKxEi3FLf1HslCiE6YWtMtOOrMnSwsDam8Mg9w=
go.opentelemetry.io/collector/pdata v1.4.0 h1:cA6Pr7Z2V7mE+i7FmYpavX7nefzd6H4CICgW0T9aJX0=
go.opentelemetry.io/collector/pdata v1.4.0/go.mod h1:0Ttp4wQinhV5oJTd9MjyvUegmZBO9O0nrlh/+EDLw+Q=
go.opentelemetry.io/collector/semconv v0.97.0 h1:iF3nTfThbiOwz7o5Pocn0dDnDoffd18ijDuf6Mwzi1s=
go.opentelemetry.io/collector/semconv v0.97.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
go.opentelemetry.io/collector/pdata v1.8.0 h1:d/QQgZxB4Y+d3mqLVh2ozvzujUhloD3P/fk7X+In764=
go.opentelemetry.io/collector/pdata v1.8.0/go.mod h1:/W7clu0wFC4WSRp94Ucn6Vm36Wkrt+tmtlDb1aiNZCY=
go.opentelemetry.io/collector/semconv v0.101.0 h1:tOe9iTe9dDCnvz/bqgfNRr4w80kXG8505tQJ5h5v08Q=
go.opentelemetry.io/collector/semconv v0.101.0/go.mod h1:8ElcRZ8Cdw5JnvhTOQOdYizkJaQ10Z2fS+R6djOnj6A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@ -770,9 +771,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -807,8 +809,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -852,17 +854,18 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -876,8 +879,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -941,16 +944,18 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -958,17 +963,16 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -1021,8 +1025,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -1042,8 +1046,8 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY=
google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg=
google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE=
google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1051,8 +1055,6 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1082,10 +1084,10 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ=
google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -1104,8 +1106,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1117,9 +1119,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1167,8 +1168,8 @@ k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

View file

@ -30,11 +30,12 @@ import (
type FloatHistogram struct {
// Counter reset information.
CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. Or
// in other words, each bucket boundary is the previous boundary times
// 2^(2^-n).
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
// each case, and then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
// the CustomValues field.
Schema int32
// Width of the zero bucket.
ZeroThreshold float64
@ -49,6 +50,16 @@ type FloatHistogram struct {
// Observation counts in buckets. Each represents an absolute count and
// must be zero or positive.
PositiveBuckets, NegativeBuckets []float64
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
// This slice is interned, to be treated as immutable and copied by reference.
// These numbers should be strictly increasing. This field is only used when the
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
// and NegativeBuckets fields are not used in that case.
CustomValues []float64
}
func (h *FloatHistogram) UsesCustomBuckets() bool {
return IsCustomBucketsSchema(h.Schema)
}
// Copy returns a deep copy of the Histogram.
@ -56,28 +67,37 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
c := FloatHistogram{
CounterResetHint: h.CounterResetHint,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.ZeroCount,
Count: h.Count,
Sum: h.Sum,
}
if h.UsesCustomBuckets() {
if len(h.CustomValues) != 0 {
c.CustomValues = make([]float64, len(h.CustomValues))
copy(c.CustomValues, h.CustomValues)
}
} else {
c.ZeroThreshold = h.ZeroThreshold
c.ZeroCount = h.ZeroCount
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
}
if len(h.PositiveSpans) != 0 {
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
copy(c.PositiveSpans, h.PositiveSpans)
}
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.PositiveBuckets) != 0 {
c.PositiveBuckets = make([]float64, len(h.PositiveBuckets))
copy(c.PositiveBuckets, h.PositiveBuckets)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]float64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
return &c
}
@ -87,32 +107,53 @@ func (h *FloatHistogram) Copy() *FloatHistogram {
func (h *FloatHistogram) CopyTo(to *FloatHistogram) {
to.CounterResetHint = h.CounterResetHint
to.Schema = h.Schema
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.Count = h.Count
to.Sum = h.Sum
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans)
if h.UsesCustomBuckets() {
to.ZeroThreshold = 0
to.ZeroCount = 0
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
copy(to.CustomValues, h.CustomValues)
} else {
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
to.CustomValues = clearIfNotNil(to.CustomValues)
}
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets)
}
// CopyToSchema works like Copy, but the returned deep copy has the provided
// target schema, which must be ≤ the original schema (i.e. it must have a lower
// resolution).
// resolution). This method panics if a custom buckets schema is used in the
// receiving FloatHistogram or as the provided targetSchema.
func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram {
if targetSchema == h.Schema {
// Fast path.
return h.Copy()
}
if h.UsesCustomBuckets() {
panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema))
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema > h.Schema {
panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema))
}
@ -185,6 +226,9 @@ func (h *FloatHistogram) TestExpression() string {
if m.ZeroThreshold != 0 {
res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold))
}
if m.UsesCustomBuckets() {
res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues))
}
addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string {
if len(spans) > 1 {
@ -210,14 +254,18 @@ func (h *FloatHistogram) TestExpression() string {
return "{{" + strings.Join(res, " ") + "}}"
}
// ZeroBucket returns the zero bucket.
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
func (h *FloatHistogram) ZeroBucket() Bucket[float64] {
if h.UsesCustomBuckets() {
panic("histograms with custom buckets have no zero bucket")
}
return Bucket[float64]{
Lower: -h.ZeroThreshold,
Upper: h.ZeroThreshold,
LowerInclusive: true,
UpperInclusive: true,
Count: h.ZeroCount,
// Index is irrelevant for the zero bucket.
}
}
@ -263,9 +311,18 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram {
//
// The method reconciles differences in the zero threshold and in the schema, and
// changes them if needed. The other histogram will not be modified in any case.
// Adding is currently only supported between 2 exponential histograms, or between
// 2 custom buckets histograms with the exact same custom bounds.
//
// This method returns a pointer to the receiving histogram for convenience.
func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) {
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
return nil, ErrHistogramsIncompatibleSchema
}
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
return nil, ErrHistogramsIncompatibleBounds
}
switch {
case other.CounterResetHint == h.CounterResetHint:
// Adding apples to apples, all good. No need to change anything.
@ -290,19 +347,28 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
// TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place
}
if !h.UsesCustomBuckets() {
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount += otherZeroCount
}
h.Count += other.Count
h.Sum += other.Sum
var (
hPositiveSpans = h.PositiveSpans
hPositiveBuckets = h.PositiveBuckets
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherPositiveSpans = other.PositiveSpans
otherPositiveBuckets = other.PositiveBuckets
)
if h.UsesCustomBuckets() {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
return h, nil
}
var (
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherNegativeSpans = other.NegativeSpans
otherNegativeBuckets = other.NegativeBuckets
)
@ -321,24 +387,40 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
return h
return h, nil
}
// Sub works like Add but subtracts the other histogram.
func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) {
if h.UsesCustomBuckets() != other.UsesCustomBuckets() {
return nil, ErrHistogramsIncompatibleSchema
}
if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) {
return nil, ErrHistogramsIncompatibleBounds
}
if !h.UsesCustomBuckets() {
otherZeroCount := h.reconcileZeroBuckets(other)
h.ZeroCount -= otherZeroCount
}
h.Count -= other.Count
h.Sum -= other.Sum
var (
hPositiveSpans = h.PositiveSpans
hPositiveBuckets = h.PositiveBuckets
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherPositiveSpans = other.PositiveSpans
otherPositiveBuckets = other.PositiveBuckets
)
if h.UsesCustomBuckets() {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
return h, nil
}
var (
hNegativeSpans = h.NegativeSpans
hNegativeBuckets = h.NegativeBuckets
otherNegativeSpans = other.NegativeSpans
otherNegativeBuckets = other.NegativeBuckets
)
@ -356,7 +438,7 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets)
h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets)
return h
return h, nil
}
// Equals returns true if the given float histogram matches exactly.
@ -365,29 +447,42 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram {
// but they must represent the same bucket layout to match.
// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns
// because this method is about data equality rather than mathematical equality.
// We ignore fields that are not used based on the exponential / custom buckets schema,
// but check fields where differences may cause unintended behaviour even if they are not
// supposed to be used according to the schema.
func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool {
if h2 == nil {
return false
}
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) ||
if h.Schema != h2.Schema ||
math.Float64bits(h.Count) != math.Float64bits(h2.Count) ||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
return false
}
if h.UsesCustomBuckets() {
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
return false
}
}
if h.ZeroThreshold != h2.ZeroThreshold ||
math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) {
if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
@ -403,6 +498,7 @@ func (h *FloatHistogram) Size() int {
negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32).
posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64).
negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64).
customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64).
// Total size of the struct.
@ -417,9 +513,10 @@ func (h *FloatHistogram) Size() int {
// fh.NegativeSpans is 24 bytes.
// fh.PositiveBuckets is 24 bytes.
// fh.NegativeBuckets is 24 bytes.
structSize := 144
// fh.CustomValues is 24 bytes.
structSize := 168
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize
return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize
}
// Compact eliminates empty buckets at the beginning and end of each span, then
@ -504,6 +601,12 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool {
if h.Count < previous.Count {
return true
}
if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) {
// Mark that something has changed or that the application has been restarted. However, this does
// not matter so much since the change in schema will be handled directly in the chunks and PromQL
// functions.
return true
}
if h.Schema > previous.Schema {
return true
}
@ -609,7 +712,7 @@ func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] {
// positive buckets in descending order (starting at the highest bucket and
// going down towards the zero bucket).
func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] {
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
return &it
}
@ -617,7 +720,7 @@ func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64]
// negative buckets in ascending order (starting at the lowest bucket and going
// up towards the zero bucket).
func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] {
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
return &it
}
@ -629,7 +732,7 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64]
func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
return &allFloatBucketIterator{
h: h,
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false),
leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil),
rightIter: h.floatBucketIterator(true, 0, h.Schema),
state: -1,
}
@ -643,30 +746,52 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] {
func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] {
return &allFloatBucketIterator{
h: h,
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true),
leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues),
rightIter: h.floatBucketIterator(false, 0, h.Schema),
state: -1,
}
}
// Validate validates consistency between span and bucket slices. Also, buckets are checked
// against negative values.
// against negative values. We check to make sure there are no unexpected fields or field values
// based on the exponential / custom buckets schema.
// We do not check for h.Count being at least as large as the sum of the
// counts in the buckets because floating point precision issues can
// create false positives here.
func (h *FloatHistogram) Validate() error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
var nCount, pCount float64
if h.UsesCustomBuckets() {
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
var nCount, pCount float64
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false)
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false)
if err != nil {
return fmt.Errorf("positive side: %w", err)
}
@ -790,17 +915,25 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 {
// If positive is true, the returned iterator iterates through the positive
// buckets, otherwise through the negative buckets.
//
// If absoluteStartValue is < the lowest absolute value of any upper bucket
// boundary, the iterator starts with the first bucket. Otherwise, it will skip
// all buckets with an absolute value of their upper boundary ≤
// absoluteStartValue.
// Only for exponential schemas, if absoluteStartValue is < the lowest absolute
// value of any upper bucket boundary, the iterator starts with the first bucket.
// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤
// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and
// no buckets are skipped.
//
// targetSchema must be ≤ the schema of FloatHistogram (and of course within the
// legal values for schemas in general). The buckets are merged to match the
// targetSchema prior to iterating (without mutating FloatHistogram).
// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets
// schemas cannot be merged with other schemas.
func (h *FloatHistogram) floatBucketIterator(
positive bool, absoluteStartValue float64, targetSchema int32,
) floatBucketIterator {
if h.UsesCustomBuckets() && targetSchema != h.Schema {
panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema"))
}
if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) {
panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema"))
}
if targetSchema > h.Schema {
panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema))
}
@ -816,6 +949,7 @@ func (h *FloatHistogram) floatBucketIterator(
if positive {
i.spans = h.PositiveSpans
i.buckets = h.PositiveBuckets
i.customValues = h.CustomValues
} else {
i.spans = h.NegativeSpans
i.buckets = h.NegativeBuckets
@ -825,7 +959,7 @@ func (h *FloatHistogram) floatBucketIterator(
// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators.
func newReverseFloatBucketIterator(
spans []Span, buckets []float64, schema int32, positive bool,
spans []Span, buckets []float64, schema int32, positive bool, customValues []float64,
) reverseFloatBucketIterator {
r := reverseFloatBucketIterator{
baseBucketIterator: baseBucketIterator[float64, float64]{
@ -833,6 +967,7 @@ func newReverseFloatBucketIterator(
spans: spans,
buckets: buckets,
positive: positive,
customValues: customValues,
},
}
@ -946,9 +1081,9 @@ func (i *floatBucketIterator) Next() bool {
}
}
// Skip buckets before absoluteStartValue.
// Skip buckets before absoluteStartValue for exponential schemas.
// TODO(beorn7): Maybe do something more efficient than this recursive call.
if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue {
return i.Next()
}
i.boundReachedStartValue = true
@ -1010,14 +1145,7 @@ func (i *allFloatBucketIterator) Next() bool {
case 0:
i.state = 1
if i.h.ZeroCount > 0 {
i.currBucket = Bucket[float64]{
Lower: -i.h.ZeroThreshold,
Upper: i.h.ZeroThreshold,
LowerInclusive: true,
UpperInclusive: true,
Count: i.h.ZeroCount,
// Index is irrelevant for the zero bucket.
}
i.currBucket = i.h.ZeroBucket()
return true
}
return i.Next()
@ -1076,7 +1204,7 @@ func addBuckets(
for _, spanB := range spansB {
indexB += spanB.Offset
for j := 0; j < int(spanB.Length); j++ {
if lowerThanThreshold && getBound(indexB, schema) <= threshold {
if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold {
goto nextLoop
}
lowerThanThreshold = false
@ -1177,7 +1305,7 @@ func addBuckets(
return spansA, bucketsA
}
func floatBucketsMatch(b1, b2 []float64) bool {
func FloatBucketsMatch(b1, b2 []float64) bool {
if len(b1) != len(b2) {
return false
}
@ -1191,7 +1319,15 @@ func floatBucketsMatch(b1, b2 []float64) bool {
// ReduceResolution reduces the float histogram's spans, buckets into target schema.
// The target schema must be smaller than the current float histogram's schema.
// This will panic if the histogram has custom buckets or if the target schema is
// a custom buckets schema.
func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram {
if h.UsesCustomBuckets() {
panic("cannot reduce resolution when there are custom buckets")
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema >= h.Schema {
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
}

File diff suppressed because it is too large Load diff

View file

@ -20,14 +20,33 @@ import (
"strings"
)
const (
ExponentialSchemaMax int32 = 8
ExponentialSchemaMin int32 = -4
CustomBucketsSchema int32 = -53
)
var (
ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets")
ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)")
ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative")
ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative")
ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided")
ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few")
ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order")
ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite")
ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas")
ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds")
)
func IsCustomBucketsSchema(s int32) bool {
return s == CustomBucketsSchema
}
func IsExponentialSchema(s int32) bool {
return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax
}
// BucketCount is a type constraint for the count in a bucket, which can be
// float64 (for type FloatHistogram) or uint64 (for type Histogram).
type BucketCount interface {
@ -115,6 +134,8 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct {
currCount IBC // Count in the current bucket.
currIdx int32 // The actual bucket index.
customValues []float64 // Bounds (usually upper) for histograms with custom buckets.
}
func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] {
@ -128,14 +149,19 @@ func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] {
Index: b.currIdx,
}
if b.positive {
bucket.Upper = getBound(b.currIdx, schema)
bucket.Lower = getBound(b.currIdx-1, schema)
bucket.Upper = getBound(b.currIdx, schema, b.customValues)
bucket.Lower = getBound(b.currIdx-1, schema, b.customValues)
} else {
bucket.Lower = -getBound(b.currIdx, schema)
bucket.Upper = -getBound(b.currIdx-1, schema)
bucket.Lower = -getBound(b.currIdx, schema, b.customValues)
bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues)
}
if IsCustomBucketsSchema(schema) {
bucket.LowerInclusive = b.currIdx == 0
bucket.UpperInclusive = true
} else {
bucket.LowerInclusive = bucket.Lower < 0
bucket.UpperInclusive = bucket.Upper > 0
}
return bucket
}
@ -393,7 +419,55 @@ func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IB
return nil
}
func getBound(idx, schema int32) float64 {
func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error {
prev := math.Inf(-1)
for _, curr := range bounds {
if curr <= prev {
return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid)
}
prev = curr
}
if prev == math.Inf(1) {
return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite)
}
var spanBuckets int
var totalSpanLength int
for n, span := range spans {
if span.Offset < 0 {
return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset)
}
spanBuckets += int(span.Length)
totalSpanLength += int(span.Length) + int(span.Offset)
}
if spanBuckets != numBuckets {
return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch)
}
if (len(bounds) + 1) < totalSpanLength {
return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch)
}
return nil
}
func getBound(idx, schema int32, customValues []float64) float64 {
if IsCustomBucketsSchema(schema) {
length := int32(len(customValues))
switch {
case idx > length || idx < -1:
panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length))
case idx == length:
return math.Inf(1)
case idx == -1:
return math.Inf(-1)
default:
return customValues[idx]
}
}
return getBoundExponential(idx, schema)
}
func getBoundExponential(idx, schema int32) float64 {
// Here a bit of context about the behavior for the last bucket counting
// regular numbers (called simply "last bucket" below) and the bucket
// counting observations of ±Inf (called "inf bucket" below, with an idx
@ -422,7 +496,7 @@ func getBound(idx, schema int32) float64 {
// bucket results in precisely that. It is either frac=1.0 & exp=1024
// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
// by the way, a power of two where the exponent itself is a power of
// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
// two, 2¹⁰ in fact, which coincides with a bucket boundary in all
// schemas.) So these are the special cases we have to catch below.
if schema < 0 {
exp := int(idx) << -schema
@ -703,3 +777,10 @@ func reduceResolution[IBC InternalBucketCount](
return targetSpans, targetBuckets
}
func clearIfNotNil[T any](items []T) []T {
if items == nil {
return nil
}
return items[:0]
}

View file

@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/require"
)
func TestGetBound(t *testing.T) {
func TestGetBoundExponential(t *testing.T) {
scenarios := []struct {
idx int32
schema int32
@ -105,7 +105,7 @@ func TestGetBound(t *testing.T) {
}
for _, s := range scenarios {
got := getBound(s.idx, s.schema)
got := getBoundExponential(s.idx, s.schema)
if s.want != got {
require.Equal(t, s.want, got, "idx %d, schema %d", s.idx, s.schema)
}

View file

@ -49,11 +49,12 @@ const (
type Histogram struct {
// Counter reset information.
CounterResetHint CounterResetHint
// Currently valid schema numbers are -4 <= n <= 8. They are all for
// base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets. Or
// in other words, each bucket boundary is the previous boundary times
// 2^(2^-n).
// Currently valid schema numbers are -4 <= n <= 8 for exponential buckets,
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in
// each case, and then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times
// 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by
// the CustomValues field.
Schema int32
// Width of the zero bucket.
ZeroThreshold float64
@ -69,6 +70,12 @@ type Histogram struct {
// count. All following ones are deltas relative to the previous
// element.
PositiveBuckets, NegativeBuckets []int64
// Holds the custom (usually upper) bounds for bucket definitions, otherwise nil.
// This slice is interned, to be treated as immutable and copied by reference.
// These numbers should be strictly increasing. This field is only used when the
// schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans
// and NegativeBuckets fields are not used in that case.
CustomValues []float64
}
// A Span defines a continuous sequence of buckets.
@ -80,33 +87,46 @@ type Span struct {
Length uint32
}
func (h *Histogram) UsesCustomBuckets() bool {
return IsCustomBucketsSchema(h.Schema)
}
// Copy returns a deep copy of the Histogram.
func (h *Histogram) Copy() *Histogram {
c := Histogram{
CounterResetHint: h.CounterResetHint,
Schema: h.Schema,
ZeroThreshold: h.ZeroThreshold,
ZeroCount: h.ZeroCount,
Count: h.Count,
Sum: h.Sum,
}
if h.UsesCustomBuckets() {
if len(h.CustomValues) != 0 {
c.CustomValues = make([]float64, len(h.CustomValues))
copy(c.CustomValues, h.CustomValues)
}
} else {
c.ZeroThreshold = h.ZeroThreshold
c.ZeroCount = h.ZeroCount
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
}
if len(h.PositiveSpans) != 0 {
c.PositiveSpans = make([]Span, len(h.PositiveSpans))
copy(c.PositiveSpans, h.PositiveSpans)
}
if len(h.NegativeSpans) != 0 {
c.NegativeSpans = make([]Span, len(h.NegativeSpans))
copy(c.NegativeSpans, h.NegativeSpans)
}
if len(h.PositiveBuckets) != 0 {
c.PositiveBuckets = make([]int64, len(h.PositiveBuckets))
copy(c.PositiveBuckets, h.PositiveBuckets)
}
if len(h.NegativeBuckets) != 0 {
c.NegativeBuckets = make([]int64, len(h.NegativeBuckets))
copy(c.NegativeBuckets, h.NegativeBuckets)
}
return &c
}
@ -116,22 +136,36 @@ func (h *Histogram) Copy() *Histogram {
func (h *Histogram) CopyTo(to *Histogram) {
to.CounterResetHint = h.CounterResetHint
to.Schema = h.Schema
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.Count = h.Count
to.Sum = h.Sum
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans)
if h.UsesCustomBuckets() {
to.ZeroThreshold = 0
to.ZeroCount = 0
to.NegativeSpans = clearIfNotNil(to.NegativeSpans)
to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets)
to.CustomValues = resize(to.CustomValues, len(h.CustomValues))
copy(to.CustomValues, h.CustomValues)
} else {
to.ZeroThreshold = h.ZeroThreshold
to.ZeroCount = h.ZeroCount
to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans))
copy(to.NegativeSpans, h.NegativeSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets)
to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets))
copy(to.NegativeBuckets, h.NegativeBuckets)
to.CustomValues = clearIfNotNil(to.CustomValues)
}
to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans))
copy(to.PositiveSpans, h.PositiveSpans)
to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets))
copy(to.PositiveBuckets, h.PositiveBuckets)
}
// String returns a string representation of the Histogram.
@ -165,8 +199,11 @@ func (h *Histogram) String() string {
return sb.String()
}
// ZeroBucket returns the zero bucket.
// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets.
func (h *Histogram) ZeroBucket() Bucket[uint64] {
if h.UsesCustomBuckets() {
panic("histograms with custom buckets have no zero bucket")
}
return Bucket[uint64]{
Lower: -h.ZeroThreshold,
Upper: h.ZeroThreshold,
@ -179,14 +216,14 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] {
// PositiveBucketIterator returns a BucketIterator to iterate over all positive
// buckets in ascending order (starting next to the zero bucket and going up).
func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] {
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true)
it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues)
return &it
}
// NegativeBucketIterator returns a BucketIterator to iterate over all negative
// buckets in descending order (starting next to the zero bucket and going down).
func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] {
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false)
it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil)
return &it
}
@ -207,30 +244,42 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] {
// but they must represent the same bucket layout to match.
// Sum is compared based on its bit pattern because this method
// is about data equality rather than mathematical equality.
// We ignore fields that are not used based on the exponential / custom buckets schema,
// but check fields where differences may cause unintended behaviour even if they are not
// supposed to be used according to the schema.
func (h *Histogram) Equals(h2 *Histogram) bool {
if h2 == nil {
return false
}
if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold ||
h.ZeroCount != h2.ZeroCount || h.Count != h2.Count ||
if h.Schema != h2.Schema || h.Count != h2.Count ||
math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) {
return false
}
if h.UsesCustomBuckets() {
if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) {
return false
}
}
if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
if !spansMatch(h.PositiveSpans, h2.PositiveSpans) {
return false
}
if !spansMatch(h.NegativeSpans, h2.NegativeSpans) {
return false
}
if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) {
return false
}
if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) {
return false
}
return true
}
@ -321,30 +370,42 @@ func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram {
}
fh.CounterResetHint = h.CounterResetHint
fh.Schema = h.Schema
fh.ZeroThreshold = h.ZeroThreshold
fh.ZeroCount = float64(h.ZeroCount)
fh.Count = float64(h.Count)
fh.Sum = h.Sum
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
copy(fh.PositiveSpans, h.PositiveSpans)
if h.UsesCustomBuckets() {
fh.ZeroThreshold = 0
fh.ZeroCount = 0
fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans)
fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets)
fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues))
copy(fh.CustomValues, h.CustomValues)
} else {
fh.ZeroThreshold = h.ZeroThreshold
fh.ZeroCount = float64(h.ZeroCount)
fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans))
copy(fh.NegativeSpans, h.NegativeSpans)
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
var currentPositive float64
for i, b := range h.PositiveBuckets {
currentPositive += float64(b)
fh.PositiveBuckets[i] = currentPositive
}
fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets))
var currentNegative float64
for i, b := range h.NegativeBuckets {
currentNegative += float64(b)
fh.NegativeBuckets[i] = currentNegative
}
fh.CustomValues = clearIfNotNil(fh.CustomValues)
}
fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans))
copy(fh.PositiveSpans, h.PositiveSpans)
fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets))
var currentPositive float64
for i, b := range h.PositiveBuckets {
currentPositive += float64(b)
fh.PositiveBuckets[i] = currentPositive
}
return fh
}
@ -357,25 +418,47 @@ func resize[T any](items []T, n int) []T {
}
// Validate validates consistency between span and bucket slices. Also, buckets are checked
// against negative values.
// against negative values. We check to make sure there are no unexpected fields or field values
// based on the exponential / custom buckets schema.
// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a
// strict h.Count = nCount + pCount + h.ZeroCount check is performed.
// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount),
// because NaN observations do not increment the values of buckets (but they do increment
// the total h.Count).
func (h *Histogram) Validate() error {
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
var nCount, pCount uint64
if h.UsesCustomBuckets() {
if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("custom buckets: %w", err)
}
if h.ZeroCount != 0 {
return fmt.Errorf("custom buckets: must have zero count of 0")
}
if h.ZeroThreshold != 0 {
return fmt.Errorf("custom buckets: must have zero threshold of 0")
}
if len(h.NegativeSpans) > 0 {
return fmt.Errorf("custom buckets: must not have negative spans")
}
if len(h.NegativeBuckets) > 0 {
return fmt.Errorf("custom buckets: must not have negative buckets")
}
} else {
if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil {
return fmt.Errorf("positive side: %w", err)
}
var nCount, pCount uint64
if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil {
return fmt.Errorf("negative side: %w", err)
}
err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true)
if err != nil {
return fmt.Errorf("negative side: %w", err)
}
err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if h.CustomValues != nil {
return fmt.Errorf("histogram with exponential schema must not have custom bounds")
}
}
err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true)
if err != nil {
return fmt.Errorf("positive side: %w", err)
}
@ -398,12 +481,13 @@ type regularBucketIterator struct {
baseBucketIterator[uint64, int64]
}
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator {
func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator {
i := baseBucketIterator[uint64, int64]{
schema: schema,
spans: spans,
buckets: buckets,
positive: positive,
customValues: customValues,
}
return regularBucketIterator{i}
}
@ -477,7 +561,7 @@ func (c *cumulativeBucketIterator) Next() bool {
if c.emptyBucketCount > 0 {
// We are traversing through empty buckets at the moment.
c.currUpper = getBound(c.currIdx, c.h.Schema)
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
c.currIdx++
c.emptyBucketCount--
return true
@ -494,7 +578,7 @@ func (c *cumulativeBucketIterator) Next() bool {
c.currCount += c.h.PositiveBuckets[c.posBucketsIdx]
c.currCumulativeCount += uint64(c.currCount)
c.currUpper = getBound(c.currIdx, c.h.Schema)
c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues)
c.posBucketsIdx++
c.idxInSpan++
@ -524,7 +608,15 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] {
// ReduceResolution reduces the histogram's spans, buckets into target schema.
// The target schema must be smaller than the current histogram's schema.
// This will panic if the histogram has custom buckets or if the target schema is
// a custom buckets schema.
func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram {
if h.UsesCustomBuckets() {
panic("cannot reduce resolution when there are custom buckets")
}
if IsCustomBucketsSchema(targetSchema) {
panic("cannot reduce resolution to custom buckets schema")
}
if targetSchema >= h.Schema {
panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema))
}

View file

@ -14,8 +14,8 @@
package histogram
import (
"fmt"
"math"
"strconv"
"testing"
"github.com/stretchr/testify/require"
@ -69,10 +69,25 @@ func TestHistogramString(t *testing.T) {
},
expectedString: "{count:19, sum:2.7, [-64,-32):1, [-16,-8):1, [-8,-4):2, [-4,-2):1, [-2,-1):3, [-1,-0.5):1, (0.5,1]:1, (1,2]:3, (2,4]:1, (4,8]:2, (8,16]:1, (16,32]:1, (32,64]:1}",
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
Count: 19,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{1, 2, 5, 10, 15, 20, 25, 50},
},
expectedString: "{count:19, sum:2.7, [-Inf,1]:1, (1,2]:3, (2,5]:1, (5,10]:2, (10,15]:1, (15,20]:1, (20,25]:1}",
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
actualString := c.histogram.String()
require.Equal(t, c.expectedString, actualString)
})
@ -208,10 +223,30 @@ func TestCumulativeBucketIterator(t *testing.T) {
{Lower: math.Inf(-1), Upper: 16, Count: 8, LowerInclusive: true, UpperInclusive: true, Index: 2},
},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{5, 10, 20, 50},
},
expectedBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: math.Inf(-1), Upper: 10, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 1},
{Lower: math.Inf(-1), Upper: 20, Count: 3, LowerInclusive: true, UpperInclusive: true, Index: 2},
{Lower: math.Inf(-1), Upper: 50, Count: 4, LowerInclusive: true, UpperInclusive: true, Index: 3},
{Lower: math.Inf(-1), Upper: math.Inf(1), Count: 5, LowerInclusive: true, UpperInclusive: true, Index: 4},
},
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.CumulativeBucketIterator()
actualBuckets := make([]Bucket[uint64], 0, len(c.expectedBuckets))
for it.Next() {
@ -368,10 +403,66 @@ func TestRegularBucketIterator(t *testing.T) {
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{5, 10, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: 5, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{0, 10, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: 0, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: 0, Upper: 10, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
{
histogram: Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 5},
},
PositiveBuckets: []int64{1, 1, 0, -1, 0},
CustomValues: []float64{-5, 0, 20, 50},
},
expectedPositiveBuckets: []Bucket[uint64]{
{Lower: math.Inf(-1), Upper: -5, Count: 1, LowerInclusive: true, UpperInclusive: true, Index: 0},
{Lower: -5, Upper: 0, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 1},
{Lower: 0, Upper: 20, Count: 2, LowerInclusive: false, UpperInclusive: true, Index: 2},
{Lower: 20, Upper: 50, Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 3},
{Lower: 50, Upper: math.Inf(1), Count: 1, LowerInclusive: false, UpperInclusive: true, Index: 4},
},
expectedNegativeBuckets: []Bucket[uint64]{},
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
t.Run(strconv.Itoa(i), func(t *testing.T) {
it := c.histogram.PositiveBucketIterator()
actualPositiveBuckets := make([]Bucket[uint64], 0, len(c.expectedPositiveBuckets))
for it.Next() {
@ -461,11 +552,81 @@ func TestHistogramToFloat(t *testing.T) {
}
}
func TestCustomBucketsHistogramToFloat(t *testing.T) {
h := Histogram{
Schema: CustomBucketsSchema,
Count: 10,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 0, Length: 0},
{Offset: 0, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{5, 10, 20, 50, 100, 500},
}
cases := []struct {
name string
fh *FloatHistogram
}{
{name: "without prior float histogram"},
{name: "prior float histogram with more buckets", fh: &FloatHistogram{
Schema: 2,
Count: 3,
Sum: 5,
ZeroThreshold: 4,
ZeroCount: 1,
PositiveSpans: []Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
NegativeSpans: []Span{
{Offset: 20, Length: 6},
{Offset: 12, Length: 7},
{Offset: 33, Length: 10},
},
NegativeBuckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9},
}},
{name: "prior float histogram with fewer buckets", fh: &FloatHistogram{
Schema: 2,
Count: 3,
Sum: 5,
ZeroThreshold: 4,
ZeroCount: 1,
PositiveSpans: []Span{
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []float64{1, 2},
NegativeSpans: []Span{
{Offset: 20, Length: 6},
{Offset: 12, Length: 7},
{Offset: 33, Length: 10},
},
NegativeBuckets: []float64{1, 2},
}},
}
require.NoError(t, h.Validate())
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
hStr := h.String()
fh := h.ToFloat(c.fh)
require.NoError(t, fh.Validate())
require.Equal(t, hStr, h.String())
require.Equal(t, hStr, fh.String())
})
}
}
// TestHistogramEquals tests both Histogram and FloatHistogram.
func TestHistogramEquals(t *testing.T) {
h1 := Histogram{
Schema: 3,
Count: 61,
Count: 62,
Sum: 2.7,
ZeroThreshold: 0.1,
ZeroCount: 42,
@ -495,6 +656,15 @@ func TestHistogramEquals(t *testing.T) {
require.False(t, h1f.Equals(h2f))
require.False(t, h2f.Equals(h1f))
}
notEqualsUntilFloatConv := func(h1, h2 Histogram) {
require.False(t, h1.Equals(&h2))
require.False(t, h2.Equals(&h1))
h1f, h2f := h1.ToFloat(nil), h2.ToFloat(nil)
require.True(t, h1f.Equals(h2f))
require.True(t, h2f.Equals(h1f))
}
require.NoError(t, h1.Validate())
h2 := h1.Copy()
equals(h1, *h2)
@ -602,6 +772,45 @@ func TestHistogramEquals(t *testing.T) {
// Sum StaleNaN vs regular NaN.
notEquals(*hStale, *hNaN)
// Has non-empty custom bounds for exponential schema.
hCustom := h1.Copy()
hCustom.CustomValues = []float64{1, 2, 3}
equals(h1, *hCustom)
cbh1 := Histogram{
Schema: CustomBucketsSchema,
Count: 10,
Sum: 2.7,
PositiveSpans: []Span{
{Offset: 0, Length: 4},
{Offset: 10, Length: 3},
},
PositiveBuckets: []int64{1, 2, -2, 1, -1, 0, 0},
CustomValues: []float64{0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 200, 250, 500, 1000},
}
require.NoError(t, cbh1.Validate())
cbh2 := cbh1.Copy()
equals(cbh1, *cbh2)
// Has different custom bounds for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.CustomValues = []float64{0.1, 0.2, 0.5}
notEquals(cbh1, *cbh2)
// Has non-empty negative spans and buckets for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.NegativeSpans = []Span{{Offset: 0, Length: 1}}
cbh2.NegativeBuckets = []int64{1}
notEqualsUntilFloatConv(cbh1, *cbh2)
// Has non-zero zero count and threshold for custom buckets schema.
cbh2 = cbh1.Copy()
cbh2.ZeroThreshold = 0.1
cbh2.ZeroCount = 10
notEqualsUntilFloatConv(cbh1, *cbh2)
}
func TestHistogramCopy(t *testing.T) {
@ -640,6 +849,21 @@ func TestHistogramCopy(t *testing.T) {
},
expected: &Histogram{},
},
{
name: "with custom buckets",
orig: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
expected: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
},
}
for _, tcase := range cases {
@ -690,6 +914,21 @@ func TestHistogramCopyTo(t *testing.T) {
},
expected: &Histogram{},
},
{
name: "with custom buckets",
orig: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
expected: &Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
},
}
for _, tcase := range cases {
@ -971,6 +1210,86 @@ func TestHistogramCompact(t *testing.T) {
NegativeBuckets: []int64{2, 3},
},
},
{
"nothing should happen with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {2, 3}},
PositiveBuckets: []int64{1, 3, -3, 42},
CustomValues: []float64{5, 10, 15},
},
},
{
"eliminate zero offsets with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 1}, {0, 3}, {0, 1}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 5}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
{
"eliminate zero length with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {2, 0}, {3, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {5, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
{
"eliminate multiple zero length spans with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {2, 0}, {2, 0}, {2, 0}, {3, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {9, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
{
"cut empty buckets at start or end of spans, even in the middle, with custom buckets",
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-4, 6}, {3, 6}},
PositiveBuckets: []int64{0, 0, 1, 3, -4, 0, 1, 42, 3, -46, 0, 0},
CustomValues: []float64{5, 10, 15, 20},
},
0,
&Histogram{
Schema: CustomBucketsSchema,
PositiveSpans: []Span{{-2, 2}, {5, 3}},
PositiveBuckets: []int64{1, 3, -3, 42, 3},
CustomValues: []float64{5, 10, 15, 20},
},
},
}
for _, c := range cases {
@ -1107,6 +1426,145 @@ func TestHistogramValidation(t *testing.T) {
errMsg: `3 observations found in buckets, but the Count field is 2: histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)`,
skipFloat: true,
},
"rejects an exponential histogram with custom buckets schema": {
h: &Histogram{
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
},
errMsg: `custom buckets: only 0 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
},
"rejects a custom buckets histogram with exponential schema": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: 0,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `histogram with exponential schema must not have custom bounds`,
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
},
"rejects a custom buckets histogram with zero/negative buckets": {
h: &Histogram{
Count: 12,
ZeroCount: 2,
ZeroThreshold: 0.001,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
NegativeSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
NegativeBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: must have zero count of 0`,
skipFloat: true, // Converting to float will remove the wrong fields so only the float version will pass validation
},
"rejects a custom buckets histogram with negative offset in first span": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: -1, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: span number 1 with offset -1: histogram has a span whose offset is negative`,
},
"rejects a custom buckets histogram with negative offset in subsequent spans": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: -1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: span number 2 with offset -1: histogram has a span whose offset is negative`,
},
"rejects a custom buckets histogram with non-matching bucket counts": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1},
CustomValues: []float64{1, 2, 3, 4},
},
errMsg: `custom buckets: spans need 4 buckets, have 3 buckets: histogram spans specify different number of buckets than provided`,
},
"rejects a custom buckets histogram with too few bounds": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3},
},
errMsg: `custom buckets: only 3 custom bounds defined which is insufficient to cover total span length of 5: histogram custom bounds are too few`,
},
"valid custom buckets histogram": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4},
},
},
"valid custom buckets histogram with extra bounds": {
h: &Histogram{
Count: 5,
Sum: 19.4,
Schema: CustomBucketsSchema,
PositiveSpans: []Span{
{Offset: 0, Length: 2},
{Offset: 1, Length: 2},
},
PositiveBuckets: []int64{1, 1, -1, 0},
CustomValues: []float64{1, 2, 3, 4, 5, 6, 7, 8},
},
},
}
for testName, tc := range tests {

Some files were not shown because too many files have changed in this diff Show more