Merge branch 'main' into mcarl/lint

Signed-off-by: Julien <291750+roidelapluie@users.noreply.github.com>
This commit is contained in:
Julien 2024-05-24 14:56:51 +02:00 committed by GitHub
commit 0512ebf9da
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
483 changed files with 26962 additions and 31215 deletions

4
.github/CODEOWNERS vendored
View file

@ -1,7 +1,7 @@
/web/ui @juliusv /web/ui @juliusv
/web/ui/module @juliusv @nexucis /web/ui/module @juliusv @nexucis
/storage/remote @csmarchbanks @cstyan @bwplotka @tomwilkie /storage/remote @cstyan @bwplotka @tomwilkie
/storage/remote/otlptranslator @gouthamve @jesusvazquez /storage/remote/otlptranslator @aknuds1 @jesusvazquez
/discovery/kubernetes @brancz /discovery/kubernetes @brancz
/tsdb @jesusvazquez /tsdb @jesusvazquez
/promql @roidelapluie /promql @roidelapluie

View file

@ -6,11 +6,11 @@ updates:
interval: "monthly" interval: "monthly"
groups: groups:
k8s.io: k8s.io:
patterns: patterns:
- "k8s.io/*" - "k8s.io/*"
go.opentelemetry.io: go.opentelemetry.io:
patterns: patterns:
- "go.opentelemetry.io/*" - "go.opentelemetry.io/*"
- package-ecosystem: "gomod" - package-ecosystem: "gomod"
directory: "/documentation/examples/remote_storage" directory: "/documentation/examples/remote_storage"
schedule: schedule:

View file

@ -12,14 +12,14 @@ jobs:
name: lint name: lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
with: with:
input: 'prompb' input: 'prompb'
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2 - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4
with: with:
input: 'prompb' input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb' against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD,subdir=prompb'

View file

@ -12,18 +12,18 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.repository_owner == 'prometheus' if: github.repository_owner == 'prometheus'
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: bufbuild/buf-setup-action@382440cdb8ec7bc25a68d7b4711163d95f7cc3aa # v1.28.1 - uses: bufbuild/buf-setup-action@517ee23296d5caf38df31c21945e6a54bbc8a89f # v1.30.0
with: with:
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}
- uses: bufbuild/buf-lint-action@bd48f53224baaaf0fc55de9a913e7680ca6dbea4 # v1.0.3 - uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
with: with:
input: 'prompb' input: 'prompb'
- uses: bufbuild/buf-breaking-action@f47418c81c00bfd65394628385593542f64db477 # v1.1.2 - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4
with: with:
input: 'prompb' input: 'prompb'
against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb' against: 'https://github.com/prometheus/prometheus.git#branch=main,ref=HEAD~1,subdir=prompb'
- uses: bufbuild/buf-push-action@342fc4cdcf29115a01cf12a2c6dd6aac68dc51e1 # v1.1.1 - uses: bufbuild/buf-push-action@a654ff18effe4641ebea4a4ce242c49800728459 # v1.1.1
with: with:
input: 'prompb' input: 'prompb'
buf_token: ${{ secrets.BUF_TOKEN }} buf_token: ${{ secrets.BUF_TOKEN }}

View file

@ -8,34 +8,56 @@ jobs:
test_go: test_go:
name: Go tests name: Go tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Whenever the Go version is updated here, .promu.yml
# should also be updated.
container: container:
image: quay.io/prometheus/golang-builder:1.21-base # Whenever the Go version is updated here, .promu.yml
# should also be updated.
image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
- run: make GO_ONLY=1 SKIP_GOLANGCI_LINT=1 - run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
- run: go test ./tsdb/ -test.tsdb-isolation=false - run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
- run: go test --tags=stringlabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
- run: make -C documentation/examples/remote_storage - run: make -C documentation/examples/remote_storage
- run: make -C documentation/examples - run: make -C documentation/examples
test_go_more:
name: More Go tests
runs-on: ubuntu-latest
container:
image: quay.io/prometheus/golang-builder:1.22-base
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment
- run: go test --tags=dedupelabels ./...
- run: GOARCH=386 go test ./cmd/prometheus
- uses: ./.github/promci/actions/check_proto - uses: ./.github/promci/actions/check_proto
with: with:
version: "3.15.8" version: "3.15.8"
test_go_oldest:
name: Go tests with previous Go version
runs-on: ubuntu-latest
container:
# The go version in this image should be N-1 wrt test_go.
image: quay.io/prometheus/golang-builder:1.21-base
steps:
- uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: make build
# Don't run NPM build; don't run race-detector.
- run: make test GO_ONLY=1 test-flags=""
test_ui: test_ui:
name: UI tests name: UI tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
container: container:
image: quay.io/prometheus/golang-builder:1.21-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/setup_environment - uses: ./.github/promci/actions/setup_environment
with: with:
@ -52,36 +74,24 @@ jobs:
name: Go tests on Windows name: Go tests on Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
go-version: 1.21.x go-version: 1.22.x
- run: | - run: |
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"} $TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
go test $TestTargets -vet=off -v go test $TestTargets -vet=off -v
shell: powershell shell: powershell
test_golang_oldest:
name: Go tests with previous Go version
runs-on: ubuntu-latest
# The go verson in this image should be N-1 wrt test_go.
container:
image: quay.io/prometheus/golang-builder:1.20-base
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: make build
- run: go test ./tsdb/...
- run: go test ./tsdb/ -test.tsdb-isolation=false
test_mixins: test_mixins:
name: Mixins tests name: Mixins tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
container: container:
image: quay.io/prometheus/golang-builder:1.20-base image: quay.io/prometheus/golang-builder:1.22-base
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: go install ./cmd/promtool/. - run: go install ./cmd/promtool/.
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
@ -104,7 +114,7 @@ jobs:
matrix: matrix:
thread: [ 0, 1, 2 ] thread: [ 0, 1, 2 ]
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
@ -127,32 +137,45 @@ jobs:
# Whenever the Go version is updated here, .promu.yml # Whenever the Go version is updated here, .promu.yml
# should also be updated. # should also be updated.
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/build - uses: ./.github/promci/actions/build
with: with:
parallelism: 12 parallelism: 12
thread: ${{ matrix.thread }} thread: ${{ matrix.thread }}
check_generated_parser:
name: Check generated parser
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Install Go
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
cache: false
go-version: 1.22.x
- name: Run goyacc and check for diff
run: make install-goyacc check-generated-parser
golangci: golangci:
name: golangci-lint name: golangci-lint
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Install Go - name: Install Go
uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0 uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with: with:
cache: false cache: false
go-version: 1.21.x go-version: 1.22.x
- name: Install snmp_exporter/generator dependencies - name: Install snmp_exporter/generator dependencies
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
if: github.repository == 'prometheus/snmp_exporter' if: github.repository == 'prometheus/snmp_exporter'
- name: Lint - name: Lint
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0
with: with:
args: --verbose args: --verbose
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml. # Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
version: v1.55.2 version: v1.56.2
fuzzing: fuzzing:
uses: ./.github/workflows/fuzzing.yml uses: ./.github/workflows/fuzzing.yml
if: github.event_name == 'pull_request' if: github.event_name == 'pull_request'
@ -162,10 +185,10 @@ jobs:
publish_main: publish_main:
name: Publish main branch artifacts name: Publish main branch artifacts
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main' if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_main - uses: ./.github/promci/actions/publish_main
with: with:
@ -176,10 +199,10 @@ jobs:
publish_release: publish_release:
name: Publish release artefacts name: Publish release artefacts
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [test_ui, test_go, test_windows, golangci, codeql, build_all] needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.') if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- uses: ./.github/promci/actions/publish_release - uses: ./.github/promci/actions/publish_release
with: with:
@ -194,14 +217,14 @@ jobs:
needs: [test_ui, codeql] needs: [test_ui, codeql]
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0 - uses: prometheus/promci@3cb0c3871f223bd5ce1226995bd52ffb314798b6 # v0.1.0
- name: Install nodejs - name: Install nodejs
uses: actions/setup-node@5e21ff4d9bc1a8cf6de233a3057d20ec6b3fb69d # v3.8.1 uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2
with: with:
node-version-file: "web/ui/.nvmrc" node-version-file: "web/ui/.nvmrc"
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"
- uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with: with:
path: ~/.npm path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }} key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}

View file

@ -20,22 +20,19 @@ jobs:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
language: ["go", "javascript"] language: ["javascript"]
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
with:
go-version: 1.21.x
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 uses: github/codeql-action/init@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 uses: github/codeql-action/autobuild@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@407ffafae6a767df3e0230c3df91b6443ae8df75 # v2.22.8 uses: github/codeql-action/analyze@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # v3.22.12

View file

@ -0,0 +1,52 @@
---
name: Push README to Docker Hub
on:
push:
paths:
- "README.md"
- ".github/workflows/container_description.yml"
branches: [ main, master ]
permissions:
contents: read
jobs:
PushDockerHubReadme:
runs-on: ubuntu-latest
name: Push README to Docker Hub
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Set docker hub repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to Dockerhub
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_USER: ${{ secrets.DOCKER_HUB_LOGIN }}
DOCKER_PASS: ${{ secrets.DOCKER_HUB_PASSWORD }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: dockerhub
short_description: ${{ env.DOCKER_REPO_NAME }}
readme_file: 'README.md'
PushQuayIoReadme:
runs-on: ubuntu-latest
name: Push README to quay.io
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- name: Set quay.io org name
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
- name: Set quay.io repo name
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
- name: Push README to quay.io
uses: christian-korneck/update-container-description-action@d36005551adeaba9698d8d67a296bd16fa91f8e8 # v1
env:
DOCKER_APIKEY: ${{ secrets.QUAY_IO_API_TOKEN }}
with:
destination_container_repo: ${{ env.DOCKER_REPO_NAME }}
provider: quay
readme_file: 'README.md'

View file

@ -21,7 +21,7 @@ jobs:
fuzz-seconds: 600 fuzz-seconds: 600
dry-run: false dry-run: false
- name: Upload Crash - name: Upload Crash
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: failure() && steps.build.outcome == 'success' if: failure() && steps.build.outcome == 'success'
with: with:
name: artifacts name: artifacts

View file

@ -13,7 +13,7 @@ jobs:
container: container:
image: quay.io/prometheus/golang-builder image: quay.io/prometheus/golang-builder
steps: steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4
- run: ./scripts/sync_repo_files.sh - run: ./scripts/sync_repo_files.sh
env: env:
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ jobs:
steps: steps:
- name: "Checkout code" - name: "Checkout code"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # tag=v4.1.1 uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # tag=v4.1.4
with: with:
persist-credentials: false persist-credentials: false
@ -37,7 +37,7 @@ jobs:
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab. # format to the repository Actions tab.
- name: "Upload artifact" - name: "Upload artifact"
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # tag=v3.1.3 uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # tag=v4.3.3
with: with:
name: SARIF file name: SARIF file
path: results.sarif path: results.sarif
@ -45,6 +45,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@407ffafae6a767df3e0230c3df91b6443ae8df75 # tag=v2.22.8 uses: github/codeql-action/upload-sarif@012739e5082ff0c22ca6d6ab32e07c36df03c4a4 # tag=v3.22.12
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View file

@ -21,11 +21,14 @@ linters:
- goimports - goimports
- misspell - misspell
- nolintlint - nolintlint
- perfsprint
- predeclared - predeclared
- revive - revive
- testifylint - testifylint
- unconvert - unconvert
- unused - unused
- usestdlibvars
- whitespace
issues: issues:
max-same-issues: 0 max-same-issues: 0
@ -42,24 +45,32 @@ issues:
- linters: - linters:
- godot - godot
source: "^// ===" source: "^// ==="
- linters:
- perfsprint
text: "fmt.Sprintf can be replaced with string addition"
linters-settings: linters-settings:
depguard: depguard:
rules: rules:
main: main:
deny: deny:
- pkg: "sync/atomic" - pkg: "sync/atomic"
desc: "Use go.uber.org/atomic instead of sync/atomic" desc: "Use go.uber.org/atomic instead of sync/atomic"
- pkg: "github.com/stretchr/testify/assert" - pkg: "github.com/stretchr/testify/assert"
desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert" desc: "Use github.com/stretchr/testify/require instead of github.com/stretchr/testify/assert"
- pkg: "github.com/go-kit/kit/log" - pkg: "github.com/go-kit/kit/log"
desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log" desc: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
- pkg: "io/ioutil" - pkg: "io/ioutil"
desc: "Use corresponding 'os' or 'io' functions instead." desc: "Use corresponding 'os' or 'io' functions instead."
- pkg: "regexp" - pkg: "regexp"
desc: "Use github.com/grafana/regexp instead of regexp" desc: "Use github.com/grafana/regexp instead of regexp"
- pkg: "github.com/pkg/errors" - pkg: "github.com/pkg/errors"
desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors" desc: "Use 'errors' or 'fmt' instead of github.com/pkg/errors"
- pkg: "gzip"
desc: "Use github.com/klauspost/compress instead of gzip"
- pkg: "zlib"
desc: "Use github.com/klauspost/compress instead of zlib"
- pkg: "golang.org/x/exp/slices"
desc: "Use 'slices' instead."
errcheck: errcheck:
exclude-functions: exclude-functions:
# Don't flag lines such as "io.Copy(io.Discard, resp.Body)". # Don't flag lines such as "io.Copy(io.Discard, resp.Body)".
@ -77,6 +88,9 @@ linters-settings:
local-prefixes: github.com/prometheus/prometheus local-prefixes: github.com/prometheus/prometheus
gofumpt: gofumpt:
extra-rules: true extra-rules: true
perfsprint:
# Optimizes `fmt.Errorf`.
errorf: false
revive: revive:
# By default, revive will enable only the linting rules that are named in the configuration file. # By default, revive will enable only the linting rules that are named in the configuration file.
# So, it's needed to explicitly set in configuration all required rules. # So, it's needed to explicitly set in configuration all required rules.
@ -129,4 +143,3 @@ linters-settings:
- require-error - require-error
- suite-dont-use-pkg - suite-dont-use-pkg
- suite-extra-assert-call - suite-extra-assert-call

View file

@ -1,7 +1,7 @@
go: go:
# Whenever the Go version is updated here, # Whenever the Go version is updated here,
# .circle/config.yml should also be updated. # .github/workflows should also be updated.
version: 1.21 version: 1.22
repository: repository:
path: github.com/prometheus/prometheus path: github.com/prometheus/prometheus
build: build:

View file

@ -1,5 +1,7 @@
--- ---
extends: default extends: default
ignore: |
ui/react-app/node_modules
rules: rules:
braces: braces:

View file

@ -2,8 +2,164 @@
## unreleased ## unreleased
* [ENHANCEMENT] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224 * [CHANGE] Rules: Execute 1 query instead of N (where N is the number of alerts within alert rule) when restoring alerts. #13980
* [BUGFIX] Agent: Participate in notify calls. #13223 * [ENHANCEMENT] Rules: Add `rule_group_last_restore_duration_seconds` to measure the time it takes to restore a rule group. #13974
* [ENHANCEMENT] OTLP: Improve remote write format translation performance by using label set hashes for metric identifiers instead of string based ones. #14006 #13991
* [ENHANCEMENT] TSDB: Optimize querying with regexp matchers. #13620
* [BUGFIX] OTLP: Don't generate target_info unless at least one identifying label is defined. #13991
* [BUGFIX] OTLP: Don't generate target_info unless there are metrics. #13991
## 2.52.0-rc.1 / 2024-05-03
* [BUGFIX] API: Fix missing comma during JSON encoding of API results. #14047
## 2.52.0-rc.0 / 2024-04-22
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
* [FEATURE] Alerting: Support native histogram templating. #13731
* [FEATURE] Linode SD: Support IPv6 range discovery and region filtering. #13774
* [ENHANCEMENT] PromQL: Performance improvements for queries with regex matchers. #13461
* [ENHANCEMENT] PromQL: Performance improvements when using aggregation operators. #13744
* [ENHANCEMENT] PromQL: Validate label_join destination label. #13803
* [ENHANCEMENT] Scrape: Increment `prometheus_target_scrapes_sample_duplicate_timestamp_total` metric on duplicated series during one scrape. #12933
* [ENHANCEMENT] TSDB: Many improvements in performance. #13742 #13673 #13782
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
* [BUGFIX] PromQL: Fix possible duplicated label name and values in a metric result for specific queries. #13845
* [BUGFIX] Scrape: Fix setting native histogram schema factor during scrape. #13846
* [BUGFIX] TSDB: Fix counting of histogram samples when creating WAL checkpoint stats. #13776
* [BUGFIX] TSDB: Fix cases of compacting empty heads. #13755
* [BUGFIX] TSDB: Count float histograms in WAL checkpoint. #13844
* [BUGFIX] Remote Read: Fix memory leak due to broken requests. #13777
* [BUGFIX] API: Stop building response for `/api/v1/series/` when the API request was cancelled. #13766
* [BUGFIX] promtool: Fix panic on `promtool tsdb analyze --extended` when no native histograms are present. #13976
## 2.51.2 / 2024-04-09
Bugfix release.
[BUGFIX] Notifier: could hang when using relabeling on alerts #13861
## 2.51.1 / 2024-03-27
Bugfix release.
* [BUGFIX] PromQL: Re-instate validation of label_join destination label #13803
* [BUGFIX] Scraping (experimental native histograms): Fix handling of the min bucket factor on sync of targets #13846
* [BUGFIX] PromQL: Some queries could return the same series twice (library use only) #13845
## 2.51.0 / 2024-03-18
This version is built with Go 1.22.1.
There is a new optional build tag "dedupelabels", which should reduce memory consumption (#12304).
It is off by default; there will be an optional alternative image to try it out.
* [CHANGE] Scraping: Do experimental timestamp alignment even if tolerance is bigger than 1% of scrape interval #13624, #13737
* [FEATURE] Alerting: Relabel rules for AlertManagerConfig; allows routing alerts to different alertmanagers #12551, #13735
* [FEATURE] API: add limit param to series, label-names and label-values APIs #13396
* [FEATURE] UI (experimental native histograms): Add native histogram chart to Table view #13658
* [FEATURE] Promtool: Add a "tsdb dump-openmetrics" to dump in OpenMetrics format. #13194
* [FEATURE] PromQL (experimental native histograms): Add histogram_avg function #13467
* [ENHANCEMENT] Rules: Evaluate independent rules concurrently #12946, #13527
* [ENHANCEMENT] Scraping (experimental native histograms): Support exemplars #13488
* [ENHANCEMENT] Remote Write: Disable resharding during active retry backoffs #13562
* [ENHANCEMENT] Observability: Add native histograms to latency/duration metrics #13681
* [ENHANCEMENT] Observability: Add 'type' label to prometheus_tsdb_head_out_of_order_samples_appended_total #13607
* [ENHANCEMENT] API: Faster generation of targets into JSON #13469, #13484
* [ENHANCEMENT] Scraping, API: Use faster compression library #10782
* [ENHANCEMENT] OpenTelemetry: Performance improvements in OTLP parsing #13627
* [ENHANCEMENT] PromQL: Optimisations to reduce CPU and memory #13448, #13536
* [BUGFIX] PromQL: Constrain extrapolation in rate() to half of sample interval #13725
* [BUGFIX] Remote Write: Stop slowing down when a new WAL segment is created #13583, #13628
* [BUGFIX] PromQL: Fix wrongly scoped range vectors with @ modifier #13559
* [BUGFIX] Kubernetes SD: Pod status changes were not discovered by Endpoints service discovery #13337
* [BUGFIX] Azure SD: Fix 'error: parameter virtualMachineScaleSetName cannot be empty' (#13702)
* [BUGFIX] Remote Write: Fix signing for AWS sigv4 transport #13497
* [BUGFIX] Observability: Exemplars emitted by Prometheus use "trace_id" not "traceID" #13589
## 2.50.1 / 2024-02-26
* [BUGFIX] API: Fix metadata API using wrong field names. #13633
## 2.50.0 / 2024-02-22
* [CHANGE] Remote Write: Error `storage.ErrTooOldSample` is now generating HTTP error 400 instead of HTTP error 500. #13335
* [FEATURE] Remote Write: Drop old inmemory samples. Activated using the config entry `sample_age_limit`. #13002
* [FEATURE] **Experimental**: Add support for ingesting zeros as created timestamps. (enabled under the feature-flag `created-timestamp-zero-ingestion`). #12733 #13279
* [FEATURE] Promtool: Add `analyze` histograms command. #12331
* [FEATURE] TSDB/compaction: Add a way to enable overlapping compaction. #13282 #13393 #13398
* [FEATURE] Add automatic memory limit handling. Activated using the feature flag. `auto-gomemlimit` #13395
* [ENHANCEMENT] Promtool: allow specifying multiple matchers in `promtool tsdb dump`. #13296
* [ENHANCEMENT] PromQL: Restore more efficient version of `NewPossibleNonCounterInfo` annotation. #13022
* [ENHANCEMENT] Kuma SD: Extend configuration to allow users to specify client ID. #13278
* [ENHANCEMENT] PromQL: Use natural sort in `sort_by_label` and `sort_by_label_desc`. This is **experimental**. #13411
* [ENHANCEMENT] Native Histograms: support `native_histogram_min_bucket_factor` in scrape_config. #13222
* [ENHANCEMENT] Native Histograms: Issue warning if histogramRate is applied to the wrong kind of histogram. #13392
* [ENHANCEMENT] TSDB: Make transaction isolation data structures smaller. #13015
* [ENHANCEMENT] TSDB/postings: Optimize merge using Loser Tree. #12878
* [ENHANCEMENT] TSDB: Simplify internal series delete function. #13261
* [ENHANCEMENT] Agent: Performance improvement by making the global hash lookup table smaller. #13262
* [ENHANCEMENT] PromQL: faster execution of metric functions, e.g. abs(), rate() #13446
* [ENHANCEMENT] TSDB: Optimize label values with matchers by taking shortcuts. #13426
* [ENHANCEMENT] Kubernetes SD: Check preconditions earlier and avoid unnecessary checks or iterations in kube_sd. #13408
* [ENHANCEMENT] Promtool: Improve visibility for `promtool test rules` with JSON colored formatting. #13342
* [ENHANCEMENT] Consoles: Exclude iowait and steal from CPU Utilisation. #9593
* [ENHANCEMENT] Various improvements and optimizations on Native Histograms. #13267, #13215, #13276 #13289, #13340
* [BUGFIX] Scraping: Fix quality value in HTTP Accept header. #13313
* [BUGFIX] UI: Fix usage of the function `time()` that was crashing. #13371
* [BUGFIX] Azure SD: Fix SD crashing when it finds a VM scale set. #13578
## 2.49.1 / 2024-01-15
* [BUGFIX] TSDB: Fixed a wrong `q=` value in scrape accept header #13313
## 2.49.0 / 2024-01-15
* [FEATURE] Promtool: Add `--run` flag promtool test rules command. #12206
* [FEATURE] SD: Add support for `NS` records to DNS SD. #13219
* [FEATURE] UI: Add heatmap visualization setting in the Graph tab, useful histograms. #13096 #13371
* [FEATURE] Scraping: Add `scrape_config.enable_compression` (default true) to disable gzip compression when scraping the target. #13166
* [FEATURE] PromQL: Add a `promql-experimental-functions` feature flag containing some new experimental PromQL functions. #13103 NOTE: More experimental functions might be added behind the same feature flag in the future. Added functions:
* Experimental `mad_over_time` (median absolute deviation around the median) function. #13059
* Experimental `sort_by_label` and `sort_by_label_desc` functions allowing sorting returned series by labels. #11299
* [FEATURE] SD: Add `__meta_linode_gpus` label to Linode SD. #13097
* [FEATURE] API: Add `exclude_alerts` query parameter to `/api/v1/rules` to only return recording rules. #12999
* [FEATURE] TSDB: --storage.tsdb.retention.time flag value is now exposed as a `prometheus_tsdb_retention_limit_seconds` metric. #12986
* [FEATURE] Scraping: Add ability to specify priority of scrape protocols to accept during scrape (e.g. to scrape Prometheus proto format for certain jobs). This can be changed by setting `global.scrape_protocols` and `scrape_config.scrape_protocols`. #12738
* [ENHANCEMENT] Scraping: Automated handling of scraping histograms that violate `scrape_config.native_histogram_bucket_limit` setting. #13129
* [ENHANCEMENT] Scraping: Optimized memory allocations when scraping. #12992
* [ENHANCEMENT] SD: Added cache for Azure SD to avoid rate-limits. #12622
* [ENHANCEMENT] TSDB: Various improvements to OOO exemplar scraping. E.g. allowing ingestion of exemplars with the same timestamp, but with different labels. #13021
* [ENHANCEMENT] API: Optimize `/api/v1/labels` and `/api/v1/label/<label_name>/values` when 1 set of matchers are used. #12888
* [ENHANCEMENT] TSDB: Various optimizations for TSDB block index, head mmap chunks and WAL, reducing latency and memory allocations (improving API calls, compaction queries etc). #12997 #13058 #13056 #13040
* [ENHANCEMENT] PromQL: Optimize memory allocations and latency when querying float histograms. #12954
* [ENHANCEMENT] Rules: Instrument TraceID in log lines for rule evaluations. #13034
* [ENHANCEMENT] PromQL: Optimize memory allocations in query_range calls. #13043
* [ENHANCEMENT] Promtool: unittest interval now defaults to evaluation_intervals when not set. #12729
* [BUGFIX] SD: Fixed Azure SD public IP reporting #13241
* [BUGFIX] API: Fix inaccuracies in posting cardinality statistics. #12653
* [BUGFIX] PromQL: Fix inaccuracies of `histogram_quantile` with classic histograms. #13153
* [BUGFIX] TSDB: Fix rare fails or inaccurate queries with OOO samples. #13115
* [BUGFIX] TSDB: Fix rare panics on append commit when exemplars are used. #13092
* [BUGFIX] TSDB: Fix exemplar WAL storage, so remote write can send/receive samples before exemplars. #13113
* [BUGFIX] Mixins: Fix `url` filter on remote write dashboards. #10721
* [BUGFIX] PromQL/TSDB: Various fixes to float histogram operations. #12891 #12977 #12609 #13190 #13189 #13191 #13201 #13212 #13208
* [BUGFIX] Promtool: Fix int32 overflow issues for 32-bit architectures. #12978
* [BUGFIX] SD: Fix Azure VM Scale Set NIC issue. #13283
## 2.48.1 / 2023-12-07
* [BUGFIX] TSDB: Make the wlog watcher read segments synchronously when not tailing. #13224
* [BUGFIX] Agent: Participate in notify calls (fixes slow down in remote write handling introduced in 2.45). #13223
## 2.48.0 / 2023-11-16 ## 2.48.0 / 2023-11-16

View file

@ -95,7 +95,7 @@ can modify the `./promql/parser/generated_parser.y.go` manually.
```golang ```golang
// As of writing this was somewhere around line 600. // As of writing this was somewhere around line 600.
var ( var (
yyDebug = 0 // This can be be a number 0 -> 5. yyDebug = 0 // This can be a number 0 -> 5.
yyErrorVerbose = false // This can be set to true. yyErrorVerbose = false // This can be set to true.
) )

View file

@ -1,15 +1,23 @@
# Maintainers # Maintainers
Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers: General maintainers:
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
Maintainers for specific parts of the codebase:
* `cmd` * `cmd`
* `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl) * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl)
* `discovery` * `discovery`
* `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz) * `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
* `documentation` * `documentation`
* `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze) * `prometheus-mixin`: Matthias Loibl (<mail@matthiasloibl.com> / @metalmatze)
* `model/histogram` and other code related to native histograms: Björn Rabenstein (<beorn@grafana.com> / @beorn7),
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
* `storage` * `storage`
* `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie) * `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez) * `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto) * `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
* `web` * `web`
@ -22,3 +30,13 @@ size of this repository, the natural changes in focus of maintainers over time,
and nuances of where particular features live, this list will always be and nuances of where particular features live, this list will always be
incomplete and out of date. However the listed maintainer(s) should be able to incomplete and out of date. However the listed maintainer(s) should be able to
direct a PR/question to the right person. direct a PR/question to the right person.
v3 release coordinators:
* Alex Greenbank (<alex.greenbank@grafana.com> / @alexgreenbank)
* Carrie Edwards (<carrie.edwards@grafana.com> / @carrieedwards)
* Fiona Liao (<fiona.liao@grafana.com> / @fionaliao)
* Jan Fajerski (<github@fajerski.name> / @jan--f)
* Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
* Nico Pazos (<nicolas.pazos-mendez@grafana.com> / @npazosmendez)
* Owen Williams (<owen.williams@grafana.com> / @ywwg)
* Tom Braack (<me@shorez.de> / @sh0rez)

View file

@ -24,6 +24,7 @@ TSDB_BENCHMARK_DATASET ?= ./tsdb/testdata/20kseries.json
TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout TSDB_BENCHMARK_OUTPUT_DIR ?= ./benchout
GOLANGCI_LINT_OPTS ?= --timeout 4m GOLANGCI_LINT_OPTS ?= --timeout 4m
GOYACC_VERSION ?= v0.6.0
include Makefile.common include Makefile.common
@ -78,24 +79,42 @@ assets-tarball: assets
@echo '>> packaging assets' @echo '>> packaging assets'
scripts/package_assets.sh scripts/package_assets.sh
# We only want to generate the parser when there's changes to the grammar.
.PHONY: parser .PHONY: parser
parser: parser:
@echo ">> running goyacc to generate the .go file." @echo ">> running goyacc to generate the .go file."
ifeq (, $(shell command -v goyacc > /dev/null)) ifeq (, $(shell command -v goyacc 2> /dev/null))
@echo "goyacc not installed so skipping" @echo "goyacc not installed so skipping"
@echo "To install: go install golang.org/x/tools/cmd/goyacc@v0.6.0" @echo "To install: \"go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)\" or run \"make install-goyacc\""
else else
goyacc -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y $(MAKE) promql/parser/generated_parser.y.go
endif endif
promql/parser/generated_parser.y.go: promql/parser/generated_parser.y
@echo ">> running goyacc to generate the .go file."
@goyacc -l -o promql/parser/generated_parser.y.go promql/parser/generated_parser.y
.PHONY: clean-parser
clean-parser:
@echo ">> cleaning generated parser"
@rm -f promql/parser/generated_parser.y.go
.PHONY: check-generated-parser
check-generated-parser: clean-parser promql/parser/generated_parser.y.go
@echo ">> checking generated parser"
@git diff --exit-code -- promql/parser/generated_parser.y.go || (echo "Generated parser is out of date. Please run 'make parser' and commit the changes." && false)
.PHONY: install-goyacc
install-goyacc:
@echo ">> installing goyacc $(GOYACC_VERSION)"
@go install golang.org/x/tools/cmd/goyacc@$(GOYACC_VERSION)
.PHONY: test .PHONY: test
# If we only want to only test go code we have to change the test target # If we only want to only test go code we have to change the test target
# which is called by all. # which is called by all.
ifeq ($(GO_ONLY),1) ifeq ($(GO_ONLY),1)
test: common-test check-go-mod-version test: common-test check-go-mod-version
else else
test: common-test ui-build-module ui-test ui-lint check-go-mod-version test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
endif endif
.PHONY: npm_licenses .PHONY: npm_licenses

View file

@ -49,20 +49,20 @@ endif
GOTEST := $(GO) test GOTEST := $(GO) test
GOTEST_DIR := GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),) ifneq ($(CIRCLE_JOB),)
ifneq ($(shell command -v gotestsum > /dev/null),) ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif endif
endif endif
PROMU_VERSION ?= 0.15.0 PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.55.2 GOLANGCI_LINT_VERSION ?= v1.56.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64 and arm64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
@ -169,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT) common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT ifdef GOLANGCI_LINT
@echo ">> running golangci-lint" @echo ">> running golangci-lint"
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
# Otherwise staticcheck might fail randomly for some reason not yet explained.
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif endif
.PHONY: common-lint-fix
common-lint-fix: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint fix"
$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
.PHONY: common-yamllint .PHONY: common-yamllint
common-yamllint: common-yamllint:
@echo ">> running yamllint on all YAML files in the repository" @echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell command -v yamllint > /dev/null)) ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping" @echo "yamllint not installed so skipping"
else else
yamllint . yamllint .
@ -204,6 +208,10 @@ common-tarball: promu
@echo ">> building release tarball" @echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
.PHONY: common-docker-repo-name
common-docker-repo-name:
@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:

View file

@ -14,7 +14,7 @@ examples and guides.</p>
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/486/badge)](https://bestpractices.coreinfrastructure.org/projects/486)
[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus) [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/prometheus/prometheus)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/prometheus.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:prometheus)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus) [![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/prometheus/prometheus/badge)](https://securityscorecards.dev/viewer/?uri=github.com/prometheus/prometheus)
</div> </div>
@ -149,7 +149,7 @@ We are publishing our Remote Write protobuf independently at
You can use that as a library: You can use that as a library:
```shell ```shell
go get go.buf.build/protocolbuffers/go/prometheus/prometheus go get buf.build/gen/go/prometheus/prometheus/protocolbuffers/go@latest
``` ```
This is experimental. This is experimental.

View file

@ -54,7 +54,10 @@ Release cadence of first pre-releases being cut is 6 weeks.
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) | | v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) | | v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) | | v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
| v2.50 | 2024-01-16 | **searching for volunteer** | | v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
| v2.53 | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.

View file

@ -1 +1 @@
2.48.0 2.52.0-rc.1

View file

@ -33,6 +33,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/KimMachineGun/automemlimit/memlimit"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
@ -41,6 +42,7 @@ import (
"github.com/mwitkow/go-conntrack" "github.com/mwitkow/go-conntrack"
"github.com/oklog/run" "github.com/oklog/run"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promlog" "github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag" promlogflag "github.com/prometheus/common/promlog/flag"
@ -98,7 +100,7 @@ var (
) )
func init() { func init() {
prometheus.MustRegister(version.NewCollector(strings.ReplaceAll(appName, "-", "_"))) prometheus.MustRegister(versioncollector.NewCollector(strings.ReplaceAll(appName, "-", "_")))
var err error var err error
defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString) defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString)
@ -136,6 +138,7 @@ type flagConfig struct {
forGracePeriod model.Duration forGracePeriod model.Duration
outageTolerance model.Duration outageTolerance model.Duration
resendDelay model.Duration resendDelay model.Duration
maxConcurrentEvals int64
web web.Options web web.Options
scrape scrape.Options scrape scrape.Options
tsdb tsdbOptions tsdb tsdbOptions
@ -147,13 +150,16 @@ type flagConfig struct {
queryMaxSamples int queryMaxSamples int
RemoteFlushDeadline model.Duration RemoteFlushDeadline model.Duration
featureList []string featureList []string
memlimitRatio float64
// These options are extracted from featureList // These options are extracted from featureList
// for ease of use. // for ease of use.
enableExpandExternalLabels bool enableExpandExternalLabels bool
enableNewSDManager bool enableNewSDManager bool
enablePerStepStats bool enablePerStepStats bool
enableAutoGOMAXPROCS bool enableAutoGOMAXPROCS bool
enableAutoGOMEMLIMIT bool
enableConcurrentRuleEval bool
prometheusURL string prometheusURL string
corsRegexString string corsRegexString string
@ -197,6 +203,12 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
case "auto-gomaxprocs": case "auto-gomaxprocs":
c.enableAutoGOMAXPROCS = true c.enableAutoGOMAXPROCS = true
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
case "auto-gomemlimit":
c.enableAutoGOMEMLIMIT = true
level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit")
case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
case "no-default-scrape-port": case "no-default-scrape-port":
c.scrape.NoDefaultPort = true c.scrape.NoDefaultPort = true
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.") level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
@ -205,6 +217,7 @@ func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
case "native-histograms": case "native-histograms":
c.tsdb.EnableNativeHistograms = true c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
@ -262,6 +275,9 @@ func main() {
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry."). a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress) Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
Default("0.9").FloatVar(&cfg.memlimitRatio)
webConfig := a.Flag( webConfig := a.Flag(
"web.config.file", "web.config.file",
"[EXPERIMENTAL] Path to configuration file that can enable TLS or authentication.", "[EXPERIMENTAL] Path to configuration file that can enable TLS or authentication.",
@ -402,6 +418,9 @@ func main() {
serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager."). serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
Default("1m").SetValue(&cfg.resendDelay) Default("1m").SetValue(&cfg.resendDelay)
serverOnlyFlag(a, "rules.max-concurrent-evals", "Global concurrency limit for independent rules that can run concurrently. When set, \"query.max-concurrency\" may need to be adjusted accordingly.").
Default("4").Int64Var(&cfg.maxConcurrentEvals)
a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release."). a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps) Hidden().Default("true").BoolVar(&scrape.AlignScrapeTimestamps)
@ -429,7 +448,7 @@ func main() {
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates."). a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval) Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details."). a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
Default("").StringsVar(&cfg.featureList) Default("").StringsVar(&cfg.featureList)
promlogflag.AddFlags(a, &cfg.promlogConfig) promlogflag.AddFlags(a, &cfg.promlogConfig)
@ -467,6 +486,11 @@ func main() {
os.Exit(3) os.Exit(3)
} }
if cfg.memlimitRatio <= 0.0 || cfg.memlimitRatio > 1.0 {
fmt.Fprintf(os.Stderr, "--auto-gomemlimit.ratio must be greater than 0 and less than or equal to 1.")
os.Exit(1)
}
localStoragePath := cfg.serverStoragePath localStoragePath := cfg.serverStoragePath
if agentMode { if agentMode {
localStoragePath = cfg.agentStoragePath localStoragePath = cfg.agentStoragePath
@ -630,9 +654,16 @@ func main() {
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err) level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
os.Exit(1) os.Exit(1)
} }
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
if err != nil {
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err)
os.Exit(1)
}
if cfg.enableNewSDManager { if cfg.enableNewSDManager {
{ {
discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, discovery.Name("scrape")) discMgr := discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
if discMgr == nil { if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape") level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1) os.Exit(1)
@ -641,7 +672,7 @@ func main() {
} }
{ {
discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, discovery.Name("notify")) discMgr := discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
if discMgr == nil { if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify") level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1) os.Exit(1)
@ -650,7 +681,7 @@ func main() {
} }
} else { } else {
{ {
discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, legacymanager.Name("scrape")) discMgr := legacymanager.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("scrape"))
if discMgr == nil { if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape") level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
os.Exit(1) os.Exit(1)
@ -659,7 +690,7 @@ func main() {
} }
{ {
discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, legacymanager.Name("notify")) discMgr := legacymanager.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, legacymanager.Name("notify"))
if discMgr == nil { if discMgr == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify") level.Error(logger).Log("msg", "failed to create a discovery manager notify")
os.Exit(1) os.Exit(1)
@ -695,6 +726,20 @@ func main() {
} }
} }
if cfg.enableAutoGOMEMLIMIT {
if _, err := memlimit.SetGoMemLimitWithOpts(
memlimit.WithRatio(cfg.memlimitRatio),
memlimit.WithProvider(
memlimit.ApplyFallback(
memlimit.FromCgroup,
memlimit.FromSystem,
),
),
); err != nil {
level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
}
}
if !agentMode { if !agentMode {
opts := promql.EngineOpts{ opts := promql.EngineOpts{
Logger: log.With(logger, "component", "query engine"), Logger: log.With(logger, "component", "query engine"),
@ -714,17 +759,19 @@ func main() {
queryEngine = promql.NewEngine(opts) queryEngine = promql.NewEngine(opts)
ruleManager = rules.NewManager(&rules.ManagerOptions{ ruleManager = rules.NewManager(&rules.ManagerOptions{
Appendable: fanoutStorage, Appendable: fanoutStorage,
Queryable: localStorage, Queryable: localStorage,
QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage), QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage),
NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()), NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()),
Context: ctxRule, Context: ctxRule,
ExternalURL: cfg.web.ExternalURL, ExternalURL: cfg.web.ExternalURL,
Registerer: prometheus.DefaultRegisterer, Registerer: prometheus.DefaultRegisterer,
Logger: log.With(logger, "component", "rule manager"), Logger: log.With(logger, "component", "rule manager"),
OutageTolerance: time.Duration(cfg.outageTolerance), OutageTolerance: time.Duration(cfg.outageTolerance),
ForGracePeriod: time.Duration(cfg.forGracePeriod), ForGracePeriod: time.Duration(cfg.forGracePeriod),
ResendDelay: time.Duration(cfg.resendDelay), ResendDelay: time.Duration(cfg.resendDelay),
MaxConcurrentEvals: cfg.maxConcurrentEvals,
ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval,
}) })
} }
@ -914,8 +961,8 @@ func main() {
func() error { func() error {
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally. // Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
select { select {
case <-term: case sig := <-term:
level.Warn(logger).Log("msg", "Received SIGTERM, exiting gracefully...") level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String())
reloadReady.Close() reloadReady.Close()
case <-webHandler.Quit(): case <-webHandler.Quit():
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")
@ -1646,6 +1693,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown, EnableMemorySnapshotOnShutdown: opts.EnableMemorySnapshotOnShutdown,
EnableNativeHistograms: opts.EnableNativeHistograms, EnableNativeHistograms: opts.EnableNativeHistograms,
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow, OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
EnableOverlappingCompaction: true,
} }
} }

View file

@ -24,6 +24,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
@ -126,12 +127,9 @@ func TestFailedStartupExitCode(t *testing.T) {
require.Error(t, err) require.Error(t, err)
var exitError *exec.ExitError var exitError *exec.ExitError
if errors.As(err, &exitError) { require.ErrorAs(t, err, &exitError)
status := exitError.Sys().(syscall.WaitStatus) status := exitError.Sys().(syscall.WaitStatus)
require.Equal(t, expectedExitStatus, status.ExitStatus()) require.Equal(t, expectedExitStatus, status.ExitStatus())
} else {
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
}
} }
type senderFunc func(alerts ...*notifier.Alert) type senderFunc func(alerts ...*notifier.Alert)
@ -192,11 +190,9 @@ func TestSendAlerts(t *testing.T) {
for i, tc := range testCases { for i, tc := range testCases {
tc := tc tc := tc
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { t.Run(strconv.Itoa(i), func(t *testing.T) {
senderFunc := senderFunc(func(alerts ...*notifier.Alert) { senderFunc := senderFunc(func(alerts ...*notifier.Alert) {
if len(tc.in) == 0 { require.NotEmpty(t, tc.in, "sender called with 0 alert")
t.Fatalf("sender called with 0 alert")
}
require.Equal(t, tc.exp, alerts) require.Equal(t, tc.exp, alerts)
}) })
rules.SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...) rules.SendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...)
@ -228,7 +224,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
go func() { done <- prom.Wait() }() go func() { done <- prom.Wait() }()
select { select {
case err := <-done: case err := <-done:
t.Errorf("prometheus should be still running: %v", err) require.Fail(t, "prometheus should be still running: %v", err)
case <-time.After(startupTime): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
@ -239,12 +235,9 @@ func TestWALSegmentSizeBounds(t *testing.T) {
err = prom.Wait() err = prom.Wait()
require.Error(t, err) require.Error(t, err)
var exitError *exec.ExitError var exitError *exec.ExitError
if errors.As(err, &exitError) { require.ErrorAs(t, err, &exitError)
status := exitError.Sys().(syscall.WaitStatus) status := exitError.Sys().(syscall.WaitStatus)
require.Equal(t, expectedExitStatus, status.ExitStatus()) require.Equal(t, expectedExitStatus, status.ExitStatus())
} else {
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
}
} }
} }
@ -274,7 +267,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
go func() { done <- prom.Wait() }() go func() { done <- prom.Wait() }()
select { select {
case err := <-done: case err := <-done:
t.Errorf("prometheus should be still running: %v", err) require.Fail(t, "prometheus should be still running: %v", err)
case <-time.After(startupTime): case <-time.After(startupTime):
prom.Process.Kill() prom.Process.Kill()
<-done <-done
@ -285,12 +278,9 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
err = prom.Wait() err = prom.Wait()
require.Error(t, err) require.Error(t, err)
var exitError *exec.ExitError var exitError *exec.ExitError
if errors.As(err, &exitError) { require.ErrorAs(t, err, &exitError)
status := exitError.Sys().(syscall.WaitStatus) status := exitError.Sys().(syscall.WaitStatus)
require.Equal(t, expectedExitStatus, status.ExitStatus()) require.Equal(t, expectedExitStatus, status.ExitStatus())
} else {
t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
}
} }
} }
@ -347,10 +337,8 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
} }
require.Len(t, g.GetMetric(), 1) require.Len(t, g.GetMetric(), 1)
if _, ok := res[m]; ok { _, ok := res[m]
t.Error("expected only one metric family for", m) require.False(t, ok, "expected only one metric family for", m)
t.FailNow()
}
res[m] = *g.GetMetric()[0].GetGauge().Value res[m] = *g.GetMetric()[0].GetGauge().Value
} }
} }

View file

@ -12,7 +12,6 @@
// limitations under the License. // limitations under the License.
// //
//go:build !windows //go:build !windows
// +build !windows
package main package main
@ -24,6 +23,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/util/testutil" "github.com/prometheus/prometheus/util/testutil"
) )
@ -38,9 +39,7 @@ func TestStartupInterrupt(t *testing.T) {
prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port) prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port)
err := prom.Start() err := prom.Start()
if err != nil { require.NoError(t, err)
t.Fatalf("execution error: %v", err)
}
done := make(chan error, 1) done := make(chan error, 1)
go func() { go func() {
@ -69,14 +68,11 @@ Loop:
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
} }
if !startedOk { require.True(t, startedOk, "prometheus didn't start in the specified timeout")
t.Fatal("prometheus didn't start in the specified timeout") err = prom.Process.Kill()
} require.Error(t, err, "prometheus didn't shutdown gracefully after sending the Interrupt signal")
switch err := prom.Process.Kill(); { // TODO - find a better way to detect when the process didn't exit as expected!
case err == nil: if stoppedErr != nil {
t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal") require.EqualError(t, stoppedErr, "signal: interrupt", "prometheus exit")
case stoppedErr != nil && stoppedErr.Error() != "signal: interrupt":
// TODO: find a better way to detect when the process didn't exit as expected!
t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
} }
} }

370
cmd/promtool/analyze.go Normal file
View file

@ -0,0 +1,370 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"strings"
"time"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
)
var (
errNotNativeHistogram = fmt.Errorf("not a native histogram")
errNotEnoughData = fmt.Errorf("not enough data")
outputHeader = `Bucket stats for each histogram series over time
------------------------------------------------
First the min, avg, and max number of populated buckets, followed by the total
number of buckets (only if different from the max number of populated buckets
which is typical for classic but not native histograms).`
outputFooter = `Aggregated bucket stats
-----------------------
Each line shows min/avg/max over the series above.`
)
type QueryAnalyzeConfig struct {
metricType string
duration time.Duration
time string
matchers []string
}
// run retrieves metrics that look like conventional histograms (i.e. have _bucket
// suffixes) or native histograms, depending on metricType flag.
func (c *QueryAnalyzeConfig) run(url *url.URL, roundtripper http.RoundTripper) error {
if c.metricType != "histogram" {
return fmt.Errorf("analyze type is %s, must be 'histogram'", c.metricType)
}
ctx := context.Background()
api, err := newAPI(url, roundtripper, nil)
if err != nil {
return err
}
var endTime time.Time
if c.time != "" {
endTime, err = parseTime(c.time)
if err != nil {
return fmt.Errorf("error parsing time '%s': %w", c.time, err)
}
} else {
endTime = time.Now()
}
return c.getStatsFromMetrics(ctx, api, endTime, os.Stdout, c.matchers)
}
func (c *QueryAnalyzeConfig) getStatsFromMetrics(ctx context.Context, api v1.API, endTime time.Time, out io.Writer, matchers []string) error {
fmt.Fprintf(out, "%s\n\n", outputHeader)
metastatsNative := newMetaStatistics()
metastatsClassic := newMetaStatistics()
for _, matcher := range matchers {
seriesSel := seriesSelector(matcher, c.duration)
matrix, err := querySamples(ctx, api, seriesSel, endTime)
if err != nil {
return err
}
matrices := make(map[string]model.Matrix)
for _, series := range matrix {
// We do not handle mixed types. If there are float values, we assume it is a
// classic histogram, otherwise we assume it is a native histogram, and we
// ignore series with errors if they do not match the expected type.
if len(series.Values) == 0 {
stats, err := calcNativeBucketStatistics(series)
if err != nil {
if errors.Is(err, errNotNativeHistogram) || errors.Is(err, errNotEnoughData) {
continue
}
return err
}
fmt.Fprintf(out, "- %s (native): %v\n", series.Metric, *stats)
metastatsNative.update(stats)
} else {
lbs := model.LabelSet(series.Metric).Clone()
if _, ok := lbs["le"]; !ok {
continue
}
metricName := string(lbs[labels.MetricName])
if !strings.HasSuffix(metricName, "_bucket") {
continue
}
delete(lbs, labels.MetricName)
delete(lbs, "le")
key := formatSeriesName(metricName, lbs)
matrices[key] = append(matrices[key], series)
}
}
for key, matrix := range matrices {
stats, err := calcClassicBucketStatistics(matrix)
if err != nil {
if errors.Is(err, errNotEnoughData) {
continue
}
return err
}
fmt.Fprintf(out, "- %s (classic): %v\n", key, *stats)
metastatsClassic.update(stats)
}
}
fmt.Fprintf(out, "\n%s\n", outputFooter)
if metastatsNative.Count() > 0 {
fmt.Fprintf(out, "\nNative %s\n", metastatsNative)
}
if metastatsClassic.Count() > 0 {
fmt.Fprintf(out, "\nClassic %s\n", metastatsClassic)
}
return nil
}
func seriesSelector(metricName string, duration time.Duration) string {
builder := strings.Builder{}
builder.WriteString(metricName)
builder.WriteRune('[')
builder.WriteString(duration.String())
builder.WriteRune(']')
return builder.String()
}
func formatSeriesName(metricName string, lbs model.LabelSet) string {
builder := strings.Builder{}
builder.WriteString(metricName)
builder.WriteString(lbs.String())
return builder.String()
}
func querySamples(ctx context.Context, api v1.API, query string, end time.Time) (model.Matrix, error) {
values, _, err := api.Query(ctx, query, end)
if err != nil {
return nil, err
}
matrix, ok := values.(model.Matrix)
if !ok {
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
}
return matrix, nil
}
// minPop/avgPop/maxPop is for the number of populated (non-zero) buckets.
// total is the total number of buckets across all samples in the series,
// populated or not.
type statistics struct {
minPop, maxPop, total int
avgPop float64
}
func (s statistics) String() string {
if s.maxPop == s.total {
return fmt.Sprintf("%d/%.3f/%d", s.minPop, s.avgPop, s.maxPop)
}
return fmt.Sprintf("%d/%.3f/%d/%d", s.minPop, s.avgPop, s.maxPop, s.total)
}
func calcClassicBucketStatistics(matrix model.Matrix) (*statistics, error) {
numBuckets := len(matrix)
stats := &statistics{
minPop: math.MaxInt,
total: numBuckets,
}
if numBuckets == 0 || len(matrix[0].Values) < 2 {
return stats, errNotEnoughData
}
numSamples := len(matrix[0].Values)
sortMatrix(matrix)
totalPop := 0
for timeIdx := 0; timeIdx < numSamples; timeIdx++ {
curr, err := getBucketCountsAtTime(matrix, numBuckets, timeIdx)
if err != nil {
return stats, err
}
countPop := 0
for _, b := range curr {
if b != 0 {
countPop++
}
}
totalPop += countPop
if stats.minPop > countPop {
stats.minPop = countPop
}
if stats.maxPop < countPop {
stats.maxPop = countPop
}
}
stats.avgPop = float64(totalPop) / float64(numSamples)
return stats, nil
}
func sortMatrix(matrix model.Matrix) {
sort.SliceStable(matrix, func(i, j int) bool {
return getLe(matrix[i]) < getLe(matrix[j])
})
}
func getLe(series *model.SampleStream) float64 {
lbs := model.LabelSet(series.Metric)
le, _ := strconv.ParseFloat(string(lbs["le"]), 64)
return le
}
func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int, error) {
counts := make([]int, numBuckets)
if timeIdx >= len(matrix[0].Values) {
// Just return zeroes instead of erroring out so we can get partial results.
return counts, nil
}
counts[0] = int(matrix[0].Values[timeIdx].Value)
for i, bucket := range matrix[1:] {
if timeIdx >= len(bucket.Values) {
// Just return zeroes instead of erroring out so we can get partial results.
return counts, nil
}
curr := bucket.Values[timeIdx]
prev := matrix[i].Values[timeIdx]
// Assume the results are nicely aligned.
if curr.Timestamp != prev.Timestamp {
return counts, fmt.Errorf("matrix result is not time aligned")
}
counts[i+1] = int(curr.Value - prev.Value)
}
return counts, nil
}
type bucketBounds struct {
boundaries int32
upper, lower float64
}
func makeBucketBounds(b *model.HistogramBucket) bucketBounds {
return bucketBounds{
boundaries: b.Boundaries,
upper: float64(b.Upper),
lower: float64(b.Lower),
}
}
func calcNativeBucketStatistics(series *model.SampleStream) (*statistics, error) {
stats := &statistics{
minPop: math.MaxInt,
}
overall := make(map[bucketBounds]struct{})
totalPop := 0
if len(series.Histograms) == 0 {
return nil, errNotNativeHistogram
}
if len(series.Histograms) == 1 {
return nil, errNotEnoughData
}
for _, histogram := range series.Histograms {
for _, bucket := range histogram.Histogram.Buckets {
bb := makeBucketBounds(bucket)
overall[bb] = struct{}{}
}
countPop := len(histogram.Histogram.Buckets)
totalPop += countPop
if stats.minPop > countPop {
stats.minPop = countPop
}
if stats.maxPop < countPop {
stats.maxPop = countPop
}
}
stats.avgPop = float64(totalPop) / float64(len(series.Histograms))
stats.total = len(overall)
return stats, nil
}
type distribution struct {
min, max, count int
avg float64
}
func newDistribution() distribution {
return distribution{
min: math.MaxInt,
}
}
func (d *distribution) update(num int) {
if d.min > num {
d.min = num
}
if d.max < num {
d.max = num
}
d.count++
d.avg += float64(num)/float64(d.count) - d.avg/float64(d.count)
}
func (d distribution) String() string {
return fmt.Sprintf("%d/%.3f/%d", d.min, d.avg, d.max)
}
type metaStatistics struct {
minPop, avgPop, maxPop, total distribution
}
func newMetaStatistics() *metaStatistics {
return &metaStatistics{
minPop: newDistribution(),
avgPop: newDistribution(),
maxPop: newDistribution(),
total: newDistribution(),
}
}
func (ms metaStatistics) Count() int {
return ms.minPop.count
}
func (ms metaStatistics) String() string {
if ms.maxPop == ms.total {
return fmt.Sprintf("histogram series (%d in total):\n- min populated: %v\n- avg populated: %v\n- max populated: %v", ms.Count(), ms.minPop, ms.avgPop, ms.maxPop)
}
return fmt.Sprintf("histogram series (%d in total):\n- min populated: %v\n- avg populated: %v\n- max populated: %v\n- total: %v", ms.Count(), ms.minPop, ms.avgPop, ms.maxPop, ms.total)
}
func (ms *metaStatistics) update(s *statistics) {
ms.minPop.update(s.minPop)
ms.avgPop.update(int(s.avgPop))
ms.maxPop.update(s.maxPop)
ms.total.update(s.total)
}

View file

@ -0,0 +1,170 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/prometheus/common/model"
)
var (
exampleMatrix = model.Matrix{
&model.SampleStream{
Metric: model.Metric{
"le": "+Inf",
},
Values: []model.SamplePair{
{
Value: 31,
Timestamp: 100,
},
{
Value: 32,
Timestamp: 200,
},
{
Value: 40,
Timestamp: 300,
},
},
},
&model.SampleStream{
Metric: model.Metric{
"le": "0.5",
},
Values: []model.SamplePair{
{
Value: 10,
Timestamp: 100,
},
{
Value: 11,
Timestamp: 200,
},
{
Value: 11,
Timestamp: 300,
},
},
},
&model.SampleStream{
Metric: model.Metric{
"le": "10",
},
Values: []model.SamplePair{
{
Value: 30,
Timestamp: 100,
},
{
Value: 31,
Timestamp: 200,
},
{
Value: 37,
Timestamp: 300,
},
},
},
&model.SampleStream{
Metric: model.Metric{
"le": "2",
},
Values: []model.SamplePair{
{
Value: 25,
Timestamp: 100,
},
{
Value: 26,
Timestamp: 200,
},
{
Value: 27,
Timestamp: 300,
},
},
},
}
exampleMatrixLength = len(exampleMatrix)
)
func init() {
sortMatrix(exampleMatrix)
}
func TestGetBucketCountsAtTime(t *testing.T) {
cases := []struct {
matrix model.Matrix
length int
timeIdx int
expected []int
}{
{
exampleMatrix,
exampleMatrixLength,
0,
[]int{10, 15, 5, 1},
},
{
exampleMatrix,
exampleMatrixLength,
1,
[]int{11, 15, 5, 1},
},
{
exampleMatrix,
exampleMatrixLength,
2,
[]int{11, 16, 10, 3},
},
}
for _, c := range cases {
t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
require.NoError(t, err)
require.Equal(t, c.expected, res)
})
}
}
func TestCalcClassicBucketStatistics(t *testing.T) {
cases := []struct {
matrix model.Matrix
expected *statistics
}{
{
exampleMatrix,
&statistics{
minPop: 4,
avgPop: 4,
maxPop: 4,
total: 4,
},
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
res, err := calcClassicBucketStatistics(c.matrix)
require.NoError(t, err)
require.Equal(t, c.expected, res)
})
}
}

View file

@ -15,9 +15,10 @@ package main
import ( import (
"archive/tar" "archive/tar"
"compress/gzip"
"fmt" "fmt"
"os" "os"
"github.com/klauspost/compress/gzip"
) )
const filePerm = 0o666 const filePerm = 0o666

View file

@ -88,7 +88,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
blockDuration := getCompatibleBlockDuration(maxBlockDuration) blockDuration := getCompatibleBlockDuration(maxBlockDuration)
mint = blockDuration * (mint / blockDuration) mint = blockDuration * (mint / blockDuration)
db, err := tsdb.OpenDBReadOnly(outputDir, nil) db, err := tsdb.OpenDBReadOnly(outputDir, "", nil)
if err != nil { if err != nil {
return err return err
} }
@ -127,7 +127,8 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
ctx := context.Background() ctx := context.Background()
app := w.Appender(ctx) app := w.Appender(ctx)
p := textparse.NewOpenMetricsParser(input) symbolTable := labels.NewSymbolTable() // One table per block means it won't grow too large.
p := textparse.NewOpenMetricsParser(input, symbolTable)
samplesCount := 0 samplesCount := 0
for { for {
e, err := p.Next() e, err := p.Next()
@ -216,7 +217,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
} }
func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) { func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {
p := textparse.NewOpenMetricsParser(input) p := textparse.NewOpenMetricsParser(input, nil) // Don't need a SymbolTable to get max and min timestamps.
maxt, mint, err := getMinAndMaxTimestamps(p) maxt, mint, err := getMinAndMaxTimestamps(p)
if err != nil { if err != nil {
return fmt.Errorf("getting min and max timestamp: %w", err) return fmt.Errorf("getting min and max timestamp: %w", err)

View file

@ -26,6 +26,7 @@ import (
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/util/testutil"
) )
type backfillSample struct { type backfillSample struct {
@ -76,7 +77,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
allSamples := queryAllSeries(t, q, expectedMinTime, expectedMaxTime) allSamples := queryAllSeries(t, q, expectedMinTime, expectedMaxTime)
sortSamples(allSamples) sortSamples(allSamples)
sortSamples(expectedSamples) sortSamples(expectedSamples)
require.Equal(t, expectedSamples, allSamples, "did not create correct samples") testutil.RequireEqual(t, expectedSamples, allSamples, "did not create correct samples")
if len(allSamples) > 0 { if len(allSamples) > 0 {
require.Equal(t, expectedMinTime, allSamples[0].Timestamp, "timestamp of first sample is not the expected minimum time") require.Equal(t, expectedMinTime, allSamples[0].Timestamp, "timestamp of first sample is not the expected minimum time")

View file

@ -35,9 +35,7 @@ import (
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/google/pprof/profile" "github.com/google/pprof/profile"
"github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/client_golang/prometheus/testutil/promlint" "github.com/prometheus/client_golang/prometheus/testutil/promlint"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -58,8 +56,8 @@ import (
"github.com/prometheus/prometheus/model/rulefmt" "github.com/prometheus/prometheus/model/rulefmt"
"github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/notifier"
_ "github.com/prometheus/prometheus/plugins" // Register plugins. _ "github.com/prometheus/prometheus/plugins" // Register plugins.
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/util/documentcli" "github.com/prometheus/prometheus/util/documentcli"
) )
@ -185,6 +183,14 @@ func main() {
queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String() queryLabelsEnd := queryLabelsCmd.Flag("end", "End time (RFC3339 or Unix timestamp).").String()
queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings() queryLabelsMatch := queryLabelsCmd.Flag("match", "Series selector. Can be specified multiple times.").Strings()
queryAnalyzeCfg := &QueryAnalyzeConfig{}
queryAnalyzeCmd := queryCmd.Command("analyze", "Run queries against your Prometheus to analyze the usage pattern of certain metrics.")
queryAnalyzeCmd.Flag("server", "Prometheus server to query.").Required().URLVar(&serverURL)
queryAnalyzeCmd.Flag("type", "Type of metric: histogram.").Required().StringVar(&queryAnalyzeCfg.metricType)
queryAnalyzeCmd.Flag("duration", "Time frame to analyze.").Default("1h").DurationVar(&queryAnalyzeCfg.duration)
queryAnalyzeCmd.Flag("time", "Query time (RFC3339 or Unix timestamp), defaults to now.").StringVar(&queryAnalyzeCfg.time)
queryAnalyzeCmd.Flag("match", "Series selector. Can be specified multiple times.").Required().StringsVar(&queryAnalyzeCfg.matchers)
pushCmd := app.Command("push", "Push to a Prometheus server.") pushCmd := app.Command("push", "Push to a Prometheus server.")
pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath) pushCmd.Flag("http.config.file", "HTTP client configuration file for promtool to connect to Prometheus.").PlaceHolder("<filename>").ExistingFileVar(&httpConfigFilePath)
pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).") pushMetricsCmd := pushCmd.Command("metrics", "Push metrics to a prometheus remote write (for testing purpose only).")
@ -204,6 +210,7 @@ func main() {
"test-rule-file", "test-rule-file",
"The unit test file.", "The unit test file.",
).Required().ExistingFiles() ).Required().ExistingFiles()
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
defaultDBPath := "data/" defaultDBPath := "data/"
tsdbCmd := app.Command("tsdb", "Run tsdb commands.") tsdbCmd := app.Command("tsdb", "Run tsdb commands.")
@ -228,9 +235,17 @@ func main() {
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.") tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String() dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64() dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64() dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector.").Default("{__name__=~'(?s:.*)'}").String() dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.") importCmd := tsdbCmd.Command("create-blocks-from", "[Experimental] Import samples from input and produce TSDB blocks. Please refer to the storage docs for more details.")
importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool() importHumanReadable := importCmd.Flag("human-readable", "Print human readable values.").Short('r').Bool()
@ -364,11 +379,12 @@ func main() {
case testRulesCmd.FullCommand(): case testRulesCmd.FullCommand():
os.Exit(RulesUnitTest( os.Exit(RulesUnitTest(
promql.LazyLoaderOpts{ promqltest.LazyLoaderOpts{
EnableAtModifier: true, EnableAtModifier: true,
EnableNegativeOffset: true, EnableNegativeOffset: true,
}, },
*testRulesRun, *testRulesRun,
*testRulesDiff,
*testRulesFiles...), *testRulesFiles...),
) )
@ -382,7 +398,9 @@ func main() {
os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable))) os.Exit(checkErr(listBlocks(*listPath, *listHumanReadable)))
case tsdbDumpCmd.FullCommand(): case tsdbDumpCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpMinTime, *dumpMaxTime, *dumpMatch))) os.Exit(checkErr(dumpSamples(ctx, *dumpPath, *dumpSandboxDirRoot, *dumpMinTime, *dumpMaxTime, *dumpMatch, formatSeriesSet)))
case tsdbDumpOpenMetricsCmd.FullCommand():
os.Exit(checkErr(dumpSamples(ctx, *dumpOpenMetricsPath, *dumpOpenMetricsSandboxDirRoot, *dumpOpenMetricsMinTime, *dumpOpenMetricsMaxTime, *dumpOpenMetricsMatch, formatSeriesSetOpenMetrics)))
// TODO(aSquare14): Work on adding support for custom block size. // TODO(aSquare14): Work on adding support for custom block size.
case openMetricsImportCmd.FullCommand(): case openMetricsImportCmd.FullCommand():
os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration)) os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
@ -390,6 +408,9 @@ func main() {
case importRulesCmd.FullCommand(): case importRulesCmd.FullCommand():
os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...))) os.Exit(checkErr(importRules(serverURL, httpRoundTripper, *importRulesStart, *importRulesEnd, *importRulesOutputDir, *importRulesEvalInterval, *maxBlockDuration, *importRulesFiles...)))
case queryAnalyzeCmd.FullCommand():
os.Exit(checkErr(queryAnalyzeCfg.run(serverURL, httpRoundTripper)))
case documentationCmd.FullCommand(): case documentationCmd.FullCommand():
os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout))) os.Exit(checkErr(documentcli.GenerateMarkdown(app.Model(), os.Stdout)))
@ -463,7 +484,7 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
return err return err
} }
request, err := http.NewRequest("GET", config.Address, nil) request, err := http.NewRequest(http.MethodGet, config.Address, nil)
if err != nil { if err != nil {
return err return err
} }
@ -997,246 +1018,6 @@ func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
return stats, total, nil return stats, total, nil
} }
// QueryInstant performs an instant query against a Prometheus server.
func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
if url.Scheme == "" {
url.Scheme = "http"
}
config := api.Config{
Address: url.String(),
RoundTripper: roundTripper,
}
// Create new client.
c, err := api.NewClient(config)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
eTime := time.Now()
if evalTime != "" {
eTime, err = parseTime(evalTime)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
return failureExitCode
}
}
// Run query against client.
api := v1.NewAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
cancel()
if err != nil {
return handleAPIError(err)
}
p.printValue(val)
return successExitCode
}
// QueryRange performs a range query against a Prometheus server.
func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
if url.Scheme == "" {
url.Scheme = "http"
}
config := api.Config{
Address: url.String(),
RoundTripper: roundTripper,
}
if len(headers) > 0 {
config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
for key, value := range headers {
req.Header.Add(key, value)
}
return roundTripper.RoundTrip(req)
})
}
// Create new client.
c, err := api.NewClient(config)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
var stime, etime time.Time
if end == "" {
etime = time.Now()
} else {
etime, err = parseTime(end)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
return failureExitCode
}
}
if start == "" {
stime = etime.Add(-5 * time.Minute)
} else {
stime, err = parseTime(start)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
return failureExitCode
}
}
if !stime.Before(etime) {
fmt.Fprintln(os.Stderr, "start time is not before end time")
return failureExitCode
}
if step == 0 {
resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1)
// Convert seconds to nanoseconds such that time.Duration parses correctly.
step = time.Duration(resolution) * time.Second
}
// Run query against client.
api := v1.NewAPI(c)
r := v1.Range{Start: stime, End: etime, Step: step}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
cancel()
if err != nil {
return handleAPIError(err)
}
p.printValue(val)
return successExitCode
}
// QuerySeries queries for a series against a Prometheus server.
func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
if url.Scheme == "" {
url.Scheme = "http"
}
config := api.Config{
Address: url.String(),
RoundTripper: roundTripper,
}
// Create new client.
c, err := api.NewClient(config)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
stime, etime, err := parseStartTimeAndEndTime(start, end)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return failureExitCode
}
// Run query against client.
api := v1.NewAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
cancel()
if err != nil {
return handleAPIError(err)
}
p.printSeries(val)
return successExitCode
}
// QueryLabels queries for label values against a Prometheus server.
func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
if url.Scheme == "" {
url.Scheme = "http"
}
config := api.Config{
Address: url.String(),
RoundTripper: roundTripper,
}
// Create new client.
c, err := api.NewClient(config)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
stime, etime, err := parseStartTimeAndEndTime(start, end)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return failureExitCode
}
// Run query against client.
api := v1.NewAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, warn, err := api.LabelValues(ctx, name, matchers, stime, etime)
cancel()
for _, v := range warn {
fmt.Fprintln(os.Stderr, "query warning:", v)
}
if err != nil {
return handleAPIError(err)
}
p.printLabelValues(val)
return successExitCode
}
func handleAPIError(err error) int {
var apiErr *v1.Error
if errors.As(err, &apiErr) && apiErr.Detail != "" {
fmt.Fprintf(os.Stderr, "query error: %v (detail: %s)\n", apiErr, strings.TrimSpace(apiErr.Detail))
} else {
fmt.Fprintln(os.Stderr, "query error:", err)
}
return failureExitCode
}
func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
var (
minTime = time.Now().Add(-9999 * time.Hour)
maxTime = time.Now().Add(9999 * time.Hour)
err error
)
stime := minTime
etime := maxTime
if start != "" {
stime, err = parseTime(start)
if err != nil {
return stime, etime, fmt.Errorf("error parsing start time: %w", err)
}
}
if end != "" {
etime, err = parseTime(end)
if err != nil {
return stime, etime, fmt.Errorf("error parsing end time: %w", err)
}
}
return stime, etime, nil
}
func parseTime(s string) (time.Time, error) {
if t, err := strconv.ParseFloat(s, 64); err == nil {
s, ns := math.Modf(t)
return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
}
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
return t, nil
}
return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
}
type endpointsGroup struct { type endpointsGroup struct {
urlToFilename map[string]string urlToFilename map[string]string
postProcess func(b []byte) ([]byte, error) postProcess func(b []byte) ([]byte, error)
@ -1390,15 +1171,12 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
evalInterval: evalInterval, evalInterval: evalInterval,
maxBlockDuration: maxBlockDuration, maxBlockDuration: maxBlockDuration,
} }
client, err := api.NewClient(api.Config{ api, err := newAPI(url, roundTripper, nil)
Address: url.String(),
RoundTripper: roundTripper,
})
if err != nil { if err != nil {
return fmt.Errorf("new api client error: %w", err) return fmt.Errorf("new api client error: %w", err)
} }
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, v1.NewAPI(client)) ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api)
errs := ruleImporter.loadGroups(ctx, files) errs := ruleImporter.loadGroups(ctx, files)
for _, err := range errs { for _, err := range errs {
if err != nil { if err != nil {

View file

@ -25,6 +25,7 @@ import (
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"syscall" "syscall"
"testing" "testing"
@ -410,7 +411,7 @@ func TestExitCodes(t *testing.T) {
} { } {
t.Run(c.file, func(t *testing.T) { t.Run(c.file, func(t *testing.T) {
for _, lintFatal := range []bool{true, false} { for _, lintFatal := range []bool{true, false} {
t.Run(fmt.Sprintf("%t", lintFatal), func(t *testing.T) { t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
args := []string{"-test.main", "check", "config", "testdata/" + c.file} args := []string{"-test.main", "check", "config", "testdata/" + c.file}
if lintFatal { if lintFatal {
args = append(args, "--lint-fatal") args = append(args, "--lint-fatal")

251
cmd/promtool/query.go Normal file
View file

@ -0,0 +1,251 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"errors"
"fmt"
"math"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/prometheus/client_golang/api"
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/client_golang/prometheus/promhttp"
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
)
func newAPI(url *url.URL, roundTripper http.RoundTripper, headers map[string]string) (v1.API, error) {
if url.Scheme == "" {
url.Scheme = "http"
}
config := api.Config{
Address: url.String(),
RoundTripper: roundTripper,
}
if len(headers) > 0 {
config.RoundTripper = promhttp.RoundTripperFunc(func(req *http.Request) (*http.Response, error) {
for key, value := range headers {
req.Header.Add(key, value)
}
return roundTripper.RoundTrip(req)
})
}
// Create new client.
client, err := api.NewClient(config)
if err != nil {
return nil, err
}
api := v1.NewAPI(client)
return api, nil
}
// QueryInstant performs an instant query against a Prometheus server.
func QueryInstant(url *url.URL, roundTripper http.RoundTripper, query, evalTime string, p printer) int {
api, err := newAPI(url, roundTripper, nil)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
eTime := time.Now()
if evalTime != "" {
eTime, err = parseTime(evalTime)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
return failureExitCode
}
}
// Run query against client.
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.Query(ctx, query, eTime) // Ignoring warnings for now.
cancel()
if err != nil {
return handleAPIError(err)
}
p.printValue(val)
return successExitCode
}
// QueryRange performs a range query against a Prometheus server.
func QueryRange(url *url.URL, roundTripper http.RoundTripper, headers map[string]string, query, start, end string, step time.Duration, p printer) int {
api, err := newAPI(url, roundTripper, headers)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
var stime, etime time.Time
if end == "" {
etime = time.Now()
} else {
etime, err = parseTime(end)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing end time:", err)
return failureExitCode
}
}
if start == "" {
stime = etime.Add(-5 * time.Minute)
} else {
stime, err = parseTime(start)
if err != nil {
fmt.Fprintln(os.Stderr, "error parsing start time:", err)
return failureExitCode
}
}
if !stime.Before(etime) {
fmt.Fprintln(os.Stderr, "start time is not before end time")
return failureExitCode
}
if step == 0 {
resolution := math.Max(math.Floor(etime.Sub(stime).Seconds()/250), 1)
// Convert seconds to nanoseconds such that time.Duration parses correctly.
step = time.Duration(resolution) * time.Second
}
// Run query against client.
r := v1.Range{Start: stime, End: etime, Step: step}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.QueryRange(ctx, query, r) // Ignoring warnings for now.
cancel()
if err != nil {
return handleAPIError(err)
}
p.printValue(val)
return successExitCode
}
// QuerySeries queries for a series against a Prometheus server.
func QuerySeries(url *url.URL, roundTripper http.RoundTripper, matchers []string, start, end string, p printer) int {
api, err := newAPI(url, roundTripper, nil)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
stime, etime, err := parseStartTimeAndEndTime(start, end)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return failureExitCode
}
// Run query against client.
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, _, err := api.Series(ctx, matchers, stime, etime) // Ignoring warnings for now.
cancel()
if err != nil {
return handleAPIError(err)
}
p.printSeries(val)
return successExitCode
}
// QueryLabels queries for label values against a Prometheus server.
func QueryLabels(url *url.URL, roundTripper http.RoundTripper, matchers []string, name, start, end string, p printer) int {
api, err := newAPI(url, roundTripper, nil)
if err != nil {
fmt.Fprintln(os.Stderr, "error creating API client:", err)
return failureExitCode
}
stime, etime, err := parseStartTimeAndEndTime(start, end)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return failureExitCode
}
// Run query against client.
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
val, warn, err := api.LabelValues(ctx, name, matchers, stime, etime)
cancel()
for _, v := range warn {
fmt.Fprintln(os.Stderr, "query warning:", v)
}
if err != nil {
return handleAPIError(err)
}
p.printLabelValues(val)
return successExitCode
}
func handleAPIError(err error) int {
var apiErr *v1.Error
if errors.As(err, &apiErr) && apiErr.Detail != "" {
fmt.Fprintf(os.Stderr, "query error: %v (detail: %s)\n", apiErr, strings.TrimSpace(apiErr.Detail))
} else {
fmt.Fprintln(os.Stderr, "query error:", err)
}
return failureExitCode
}
func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
var (
minTime = time.Now().Add(-9999 * time.Hour)
maxTime = time.Now().Add(9999 * time.Hour)
err error
)
stime := minTime
etime := maxTime
if start != "" {
stime, err = parseTime(start)
if err != nil {
return stime, etime, fmt.Errorf("error parsing start time: %w", err)
}
}
if end != "" {
etime, err = parseTime(end)
if err != nil {
return stime, etime, fmt.Errorf("error parsing end time: %w", err)
}
}
return stime, etime, nil
}
func parseTime(s string) (time.Time, error) {
if t, err := strconv.ParseFloat(s, 64); err == nil {
s, ns := math.Modf(t)
return time.Unix(int64(s), int64(ns*float64(time.Second))).UTC(), nil
}
if t, err := time.Parse(time.RFC3339Nano, s); err == nil {
return t, nil
}
return time.Time{}, fmt.Errorf("cannot parse %q to a valid timestamp", s)
}

View file

@ -234,17 +234,3 @@ func (m *multipleAppender) flushAndCommit(ctx context.Context) error {
} }
return nil return nil
} }
func max(x, y int64) int64 {
if x > y {
return x
}
return y
}
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}

View file

@ -78,7 +78,6 @@ func TestBackfillRuleIntegration(t *testing.T) {
// Execute the test more than once to simulate running the rule importer twice with the same data. // Execute the test more than once to simulate running the rule importer twice with the same data.
// We expect duplicate blocks with the same series are created when run more than once. // We expect duplicate blocks with the same series are created when run more than once.
for i := 0; i < tt.runcount; i++ { for i := 0; i < tt.runcount; i++ {
ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration) ruleImporter, err := newTestRuleImporter(ctx, start, tmpDir, tt.samples, tt.maxBlockDuration)
require.NoError(t, err) require.NoError(t, err)
path1 := filepath.Join(tmpDir, "test.file") path1 := filepath.Join(tmpDir, "test.file")

View file

@ -18,10 +18,10 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"reflect"
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
@ -78,12 +78,25 @@ func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefault
defer cancel() defer cancel()
for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs { for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Registerer: registerer}) reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
err := metrics.Register()
if err != nil {
fmt.Fprintln(os.Stderr, "Could not register service discovery metrics", err)
return failureExitCode
}
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger, Metrics: metrics})
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, "Could not create new discoverer", err) fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
return failureExitCode return failureExitCode
} }
go d.Run(ctx, targetGroupChan) go func() {
d.Run(ctx, targetGroupChan)
metrics.Unregister()
refreshMetrics.Unregister()
}()
} }
var targetGroups []*targetgroup.Group var targetGroups []*targetgroup.Group
@ -140,7 +153,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
duplicateRes := false duplicateRes := false
for _, sdCheckRes := range sdCheckResults { for _, sdCheckRes := range sdCheckResults {
if reflect.DeepEqual(sdCheckRes, result) { if cmp.Equal(sdCheckRes, result, cmp.Comparer(labels.Equal)) {
duplicateRes = true duplicateRes = true
break break
} }

View file

@ -23,6 +23,7 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/util/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -69,5 +70,5 @@ func TestSDCheckResult(t *testing.T) {
}, },
} }
require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true)) testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
} }

View file

@ -0,0 +1,15 @@
my_histogram_bucket{instance="localhost:8000",job="example2",le="+Inf"} 1.0267820369e+10 1700215884.373
my_histogram_bucket{instance="localhost:8000",job="example2",le="+Inf"} 1.026872507e+10 1700215889.373
my_histogram_bucket{instance="localhost:8000",job="example2",le="0.01"} 0 1700215884.373
my_histogram_bucket{instance="localhost:8000",job="example2",le="0.01"} 0 1700215889.373
my_histogram_count{instance="localhost:8000",job="example2"} 1.0267820369e+10 1700215884.373
my_histogram_count{instance="localhost:8000",job="example2"} 1.026872507e+10 1700215889.373
my_summary_count{instance="localhost:8000",job="example5"} 9.518161497e+09 1700211684.981
my_summary_count{instance="localhost:8000",job="example5"} 9.519048034e+09 1700211689.984
my_summary_sum{instance="localhost:8000",job="example5"} 5.2349889185e+10 1700211684.981
my_summary_sum{instance="localhost:8000",job="example5"} 5.2354761848e+10 1700211689.984
up{instance="localhost:8000",job="example2"} 1 1700226034.330
up{instance="localhost:8000",job="example2"} 1 1700226094.329
up{instance="localhost:8000",job="example3"} 1 1700210681.366
up{instance="localhost:8000",job="example3"} 1 1700210686.366
# EOF

View file

@ -0,0 +1,11 @@
my_counter{baz="abc",foo="bar"} 1 0.000
my_counter{baz="abc",foo="bar"} 2 60.000
my_counter{baz="abc",foo="bar"} 3 120.000
my_counter{baz="abc",foo="bar"} 4 180.000
my_counter{baz="abc",foo="bar"} 5 240.000
my_gauge{abc="baz",bar="foo"} 9 0.000
my_gauge{abc="baz",bar="foo"} 8 60.000
my_gauge{abc="baz",bar="foo"} 0 120.000
my_gauge{abc="baz",bar="foo"} 4 180.000
my_gauge{abc="baz",bar="foo"} 7 240.000
# EOF

15
cmd/promtool/testdata/dump-test-1.prom vendored Normal file
View file

@ -0,0 +1,15 @@
{__name__="heavy_metric", foo="bar"} 5 0
{__name__="heavy_metric", foo="bar"} 4 60000
{__name__="heavy_metric", foo="bar"} 3 120000
{__name__="heavy_metric", foo="bar"} 2 180000
{__name__="heavy_metric", foo="bar"} 1 240000
{__name__="heavy_metric", foo="foo"} 5 0
{__name__="heavy_metric", foo="foo"} 4 60000
{__name__="heavy_metric", foo="foo"} 3 120000
{__name__="heavy_metric", foo="foo"} 2 180000
{__name__="heavy_metric", foo="foo"} 1 240000
{__name__="metric", baz="abc", foo="bar"} 1 0
{__name__="metric", baz="abc", foo="bar"} 2 60000
{__name__="metric", baz="abc", foo="bar"} 3 120000
{__name__="metric", baz="abc", foo="bar"} 4 180000
{__name__="metric", baz="abc", foo="bar"} 5 240000

10
cmd/promtool/testdata/dump-test-2.prom vendored Normal file
View file

@ -0,0 +1,10 @@
{__name__="heavy_metric", foo="foo"} 5 0
{__name__="heavy_metric", foo="foo"} 4 60000
{__name__="heavy_metric", foo="foo"} 3 120000
{__name__="heavy_metric", foo="foo"} 2 180000
{__name__="heavy_metric", foo="foo"} 1 240000
{__name__="metric", baz="abc", foo="bar"} 1 0
{__name__="metric", baz="abc", foo="bar"} 2 60000
{__name__="metric", baz="abc", foo="bar"} 3 120000
{__name__="metric", baz="abc", foo="bar"} 4 180000
{__name__="metric", baz="abc", foo="bar"} 5 240000

View file

@ -0,0 +1,2 @@
{__name__="metric", baz="abc", foo="bar"} 2 60000
{__name__="metric", baz="abc", foo="bar"} 3 120000

View file

@ -15,6 +15,7 @@ package main
import ( import (
"bufio" "bufio"
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
@ -23,6 +24,7 @@ import (
"path/filepath" "path/filepath"
"runtime" "runtime"
"runtime/pprof" "runtime/pprof"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@ -31,7 +33,7 @@ import (
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log" "github.com/go-kit/log"
"golang.org/x/exp/slices" "go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
@ -148,8 +150,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
} }
func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) { func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (uint64, error) {
var mu sync.Mutex var total atomic.Uint64
var total uint64
for i := 0; i < scrapeCount; i += 100 { for i := 0; i < scrapeCount; i += 100 {
var wg sync.WaitGroup var wg sync.WaitGroup
@ -164,22 +165,21 @@ func (b *writeBenchmark) ingestScrapes(lbls []labels.Labels, scrapeCount int) (u
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done()
n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i)) n, err := b.ingestScrapesShard(batch, 100, int64(timeDelta*i))
if err != nil { if err != nil {
// exitWithError(err) // exitWithError(err)
fmt.Println(" err", err) fmt.Println(" err", err)
} }
mu.Lock() total.Add(n)
total += n
mu.Unlock()
wg.Done()
}() }()
} }
wg.Wait() wg.Wait()
} }
fmt.Println("ingestion completed") fmt.Println("ingestion completed")
return total, nil return total.Load(), nil
} }
func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) { func (b *writeBenchmark) ingestScrapesShard(lbls []labels.Labels, scrapeCount int, baset int64) (uint64, error) {
@ -338,7 +338,7 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
} }
func listBlocks(path string, humanReadable bool) error { func listBlocks(path string, humanReadable bool) error {
db, err := tsdb.OpenDBReadOnly(path, nil) db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil { if err != nil {
return err return err
} }
@ -393,7 +393,7 @@ func getFormatedBytes(bytes int64, humanReadable bool) string {
} }
func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) { func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error) {
db, err := tsdb.OpenDBReadOnly(path, nil) db, err := tsdb.OpenDBReadOnly(path, "", nil)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -667,7 +667,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
it := fhchk.Iterator(nil) it := fhchk.Iterator(nil)
bucketCount := 0 bucketCount := 0
for it.Next() == chunkenc.ValFloatHistogram { for it.Next() == chunkenc.ValFloatHistogram {
_, f := it.AtFloatHistogram() _, f := it.AtFloatHistogram(nil)
bucketCount += len(f.PositiveBuckets) bucketCount += len(f.PositiveBuckets)
bucketCount += len(f.NegativeBuckets) bucketCount += len(f.NegativeBuckets)
} }
@ -682,7 +682,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
it := hchk.Iterator(nil) it := hchk.Iterator(nil)
bucketCount := 0 bucketCount := 0
for it.Next() == chunkenc.ValHistogram { for it.Next() == chunkenc.ValHistogram {
_, f := it.AtHistogram() _, f := it.AtHistogram(nil)
bucketCount += len(f.PositiveBuckets) bucketCount += len(f.PositiveBuckets)
bucketCount += len(f.NegativeBuckets) bucketCount += len(f.NegativeBuckets)
} }
@ -706,8 +706,10 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
return nil return nil
} }
func dumpSamples(ctx context.Context, path string, mint, maxt int64, match string) (err error) { type SeriesSetFormatter func(series storage.SeriesSet) error
db, err := tsdb.OpenDBReadOnly(path, nil)
func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) (err error) {
db, err := tsdb.OpenDBReadOnly(dbDir, sandboxDirRoot, nil)
if err != nil { if err != nil {
return err return err
} }
@ -720,31 +722,25 @@ func dumpSamples(ctx context.Context, path string, mint, maxt int64, match strin
} }
defer q.Close() defer q.Close()
matchers, err := parser.ParseMetricSelector(match) matcherSets, err := parser.ParseMetricSelectors(match)
if err != nil { if err != nil {
return err return err
} }
ss := q.Select(ctx, false, nil, matchers...)
for ss.Next() { var ss storage.SeriesSet
series := ss.At() if len(matcherSets) > 1 {
lbs := series.Labels() var sets []storage.SeriesSet
it := series.Iterator(nil) for _, mset := range matcherSets {
for it.Next() == chunkenc.ValFloat { sets = append(sets, q.Select(ctx, true, nil, mset...))
ts, val := it.At()
fmt.Printf("%s %g %d\n", lbs, val, ts)
}
for it.Next() == chunkenc.ValFloatHistogram {
ts, fh := it.AtFloatHistogram()
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
}
for it.Next() == chunkenc.ValHistogram {
ts, h := it.AtHistogram()
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
}
if it.Err() != nil {
return ss.Err()
} }
ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
} else {
ss = q.Select(ctx, false, nil, matcherSets[0]...)
}
err = formatter(ss)
if err != nil {
return err
} }
if ws := ss.Warnings(); len(ws) > 0 { if ws := ss.Warnings(); len(ws) > 0 {
@ -757,6 +753,68 @@ func dumpSamples(ctx context.Context, path string, mint, maxt int64, match strin
return nil return nil
} }
func formatSeriesSet(ss storage.SeriesSet) error {
for ss.Next() {
series := ss.At()
lbs := series.Labels()
it := series.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
ts, val := it.At()
fmt.Printf("%s %g %d\n", lbs, val, ts)
}
for it.Next() == chunkenc.ValFloatHistogram {
ts, fh := it.AtFloatHistogram(nil)
fmt.Printf("%s %s %d\n", lbs, fh.String(), ts)
}
for it.Next() == chunkenc.ValHistogram {
ts, h := it.AtHistogram(nil)
fmt.Printf("%s %s %d\n", lbs, h.String(), ts)
}
if it.Err() != nil {
return ss.Err()
}
}
return nil
}
// CondensedString is labels.Labels.String() without spaces after the commas.
func CondensedString(ls labels.Labels) string {
var b bytes.Buffer
b.WriteByte('{')
i := 0
ls.Range(func(l labels.Label) {
if i > 0 {
b.WriteByte(',')
}
b.WriteString(l.Name)
b.WriteByte('=')
b.WriteString(strconv.Quote(l.Value))
i++
})
b.WriteByte('}')
return b.String()
}
func formatSeriesSetOpenMetrics(ss storage.SeriesSet) error {
for ss.Next() {
series := ss.At()
lbs := series.Labels()
metricName := lbs.Get(labels.MetricName)
lbs = lbs.DropMetricName()
it := series.Iterator(nil)
for it.Next() == chunkenc.ValFloat {
ts, val := it.At()
fmt.Printf("%s%s %g %.3f\n", metricName, CondensedString(lbs), val, float64(ts)/1000)
}
if it.Err() != nil {
return ss.Err()
}
}
fmt.Println("# EOF")
return nil
}
func checkErr(err error) int { func checkErr(err error) int {
if err != nil { if err != nil {
fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, err)
@ -780,6 +838,10 @@ func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxB
} }
func displayHistogram(dataType string, datas []int, total int) { func displayHistogram(dataType string, datas []int, total int) {
if len(datas) == 0 {
fmt.Printf("%s: N/A\n\n", dataType)
return
}
slices.Sort(datas) slices.Sort(datas)
start, end, step := generateBucket(datas[0], datas[len(datas)-1]) start, end, step := generateBucket(datas[0], datas[len(datas)-1])
sum := 0 sum := 0
@ -794,9 +856,9 @@ func displayHistogram(dataType string, datas []int, total int) {
} }
avg := sum / len(datas) avg := sum / len(datas)
fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1]) fmt.Printf("%s (min/avg/max): %d/%d/%d\n", dataType, datas[0], avg, datas[len(datas)-1])
maxLeftLen := strconv.Itoa(len(fmt.Sprintf("%d", end))) maxLeftLen := strconv.Itoa(len(strconv.Itoa(end)))
maxRightLen := strconv.Itoa(len(fmt.Sprintf("%d", end+step))) maxRightLen := strconv.Itoa(len(strconv.Itoa(end + step)))
maxCountLen := strconv.Itoa(len(fmt.Sprintf("%d", maxCount))) maxCountLen := strconv.Itoa(len(strconv.Itoa(maxCount)))
for bucket, count := range buckets { for bucket, count := range buckets {
percentage := 100.0 * count / total percentage := 100.0 * count / total
fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage)) fmt.Printf("[%"+maxLeftLen+"d, %"+maxRightLen+"d]: %"+maxCountLen+"d %s\n", bucket*step+start+1, (bucket+1)*step+start, count, strings.Repeat("#", percentage))

View file

@ -14,9 +14,20 @@
package main package main
import ( import (
"bytes"
"context"
"io"
"math"
"os"
"runtime"
"strings"
"testing" "testing"
"time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/tsdb"
) )
func TestGenerateBucket(t *testing.T) { func TestGenerateBucket(t *testing.T) {
@ -41,3 +52,144 @@ func TestGenerateBucket(t *testing.T) {
require.Equal(t, tc.step, step) require.Equal(t, tc.step, step)
} }
} }
// getDumpedSamples dumps samples and returns them.
func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string {
t.Helper()
oldStdout := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
err := dumpSamples(
context.Background(),
path,
t.TempDir(),
mint,
maxt,
match,
formatter,
)
require.NoError(t, err)
w.Close()
os.Stdout = oldStdout
var buf bytes.Buffer
io.Copy(&buf, r)
return buf.String()
}
func normalizeNewLine(b []byte) []byte {
if strings.Contains(runtime.GOOS, "windows") {
// We use "/n" while dumping on windows as well.
return bytes.ReplaceAll(b, []byte("\r\n"), []byte("\n"))
}
return b
}
func TestTSDBDump(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
metric{foo="bar", baz="abc"} 1 2 3 4 5
heavy_metric{foo="bar"} 5 4 3 2 1
heavy_metric{foo="foo"} 5 4 3 2 1
`)
tests := []struct {
name string
mint int64
maxt int64
match []string
expectedDump string
}{
{
name: "default match",
mint: math.MinInt64,
maxt: math.MaxInt64,
match: []string{"{__name__=~'(?s:.*)'}"},
expectedDump: "testdata/dump-test-1.prom",
},
{
name: "same matcher twice",
mint: math.MinInt64,
maxt: math.MaxInt64,
match: []string{"{foo=~'.+'}", "{foo=~'.+'}"},
expectedDump: "testdata/dump-test-1.prom",
},
{
name: "no duplication",
mint: math.MinInt64,
maxt: math.MaxInt64,
match: []string{"{__name__=~'(?s:.*)'}", "{baz='abc'}"},
expectedDump: "testdata/dump-test-1.prom",
},
{
name: "well merged",
mint: math.MinInt64,
maxt: math.MaxInt64,
match: []string{"{__name__='heavy_metric'}", "{baz='abc'}"},
expectedDump: "testdata/dump-test-1.prom",
},
{
name: "multi matchers",
mint: math.MinInt64,
maxt: math.MaxInt64,
match: []string{"{__name__='heavy_metric',foo='foo'}", "{__name__='metric'}"},
expectedDump: "testdata/dump-test-2.prom",
},
{
name: "with reduced mint and maxt",
mint: int64(60000),
maxt: int64(120000),
match: []string{"{__name__='metric'}"},
expectedDump: "testdata/dump-test-3.prom",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet)
expectedMetrics, err := os.ReadFile(tt.expectedDump)
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
// even though in case of one matcher samples are not sorted, the order in the cases above should stay the same.
require.Equal(t, string(expectedMetrics), dumpedMetrics)
})
}
}
func TestTSDBDumpOpenMetrics(t *testing.T) {
storage := promqltest.LoadedStorage(t, `
load 1m
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
`)
expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom")
require.NoError(t, err)
expectedMetrics = normalizeNewLine(expectedMetrics)
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
require.Equal(t, string(expectedMetrics), dumpedMetrics)
}
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
initialMetrics, err := os.ReadFile("testdata/dump-openmetrics-roundtrip-test.prom")
require.NoError(t, err)
initialMetrics = normalizeNewLine(initialMetrics)
dbDir := t.TempDir()
// Import samples from OM format
err = backfill(5000, initialMetrics, dbDir, false, false, 2*time.Hour)
require.NoError(t, err)
db, err := tsdb.Open(dbDir, nil, nil, tsdb.DefaultOptions(), nil)
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, db.Close())
})
// Dump the blocks into OM format
dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
// Should get back the initial metrics.
require.Equal(t, string(initialMetrics), dumpedMetrics)
}

View file

@ -15,18 +15,20 @@ package main
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/google/go-cmp/cmp"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/nsf/jsondiff"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -34,13 +36,14 @@ import (
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/promql/promqltest"
"github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
) )
// RulesUnitTest does unit testing of rules based on the unit testing files provided. // RulesUnitTest does unit testing of rules based on the unit testing files provided.
// More info about the file format can be found in the docs. // More info about the file format can be found in the docs.
func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files ...string) int { func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
failed := false failed := false
var run *regexp.Regexp var run *regexp.Regexp
@ -49,7 +52,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files .
} }
for _, f := range files { for _, f := range files {
if errs := ruleUnitTest(f, queryOpts, run); errs != nil { if errs := ruleUnitTest(f, queryOpts, run, diffFlag); errs != nil {
fmt.Fprintln(os.Stderr, " FAILED:") fmt.Fprintln(os.Stderr, " FAILED:")
for _, e := range errs { for _, e := range errs {
fmt.Fprintln(os.Stderr, e.Error()) fmt.Fprintln(os.Stderr, e.Error())
@ -67,7 +70,7 @@ func RulesUnitTest(queryOpts promql.LazyLoaderOpts, runStrings []string, files .
return successExitCode return successExitCode
} }
func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.Regexp) []error { func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool) []error {
fmt.Println("Unit Testing: ", filename) fmt.Println("Unit Testing: ", filename)
b, err := os.ReadFile(filename) b, err := os.ReadFile(filename)
@ -109,7 +112,7 @@ func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts, run *regexp.
if t.Interval == 0 { if t.Interval == 0 {
t.Interval = unitTestInp.EvaluationInterval t.Interval = unitTestInp.EvaluationInterval
} }
ers := t.test(evalInterval, groupOrderMap, queryOpts, unitTestInp.RuleFiles...) ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
if ers != nil { if ers != nil {
errs = append(errs, ers...) errs = append(errs, ers...)
} }
@ -173,13 +176,18 @@ type testGroup struct {
} }
// test performs the unit tests. // test performs the unit tests.
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promql.LazyLoaderOpts, ruleFiles ...string) []error { func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
// Setup testing suite. // Setup testing suite.
suite, err := promql.NewLazyLoader(nil, tg.seriesLoadingString(), queryOpts) suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
if err != nil { if err != nil {
return []error{err} return []error{err}
} }
defer suite.Close() defer func() {
err := suite.Close()
if err != nil {
outErr = append(outErr, err)
}
}()
suite.SubqueryInterval = evalInterval suite.SubqueryInterval = evalInterval
// Load the rule files. // Load the rule files.
@ -338,15 +346,51 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
sort.Sort(gotAlerts) sort.Sort(gotAlerts)
sort.Sort(expAlerts) sort.Sort(expAlerts)
if !reflect.DeepEqual(expAlerts, gotAlerts) { if !cmp.Equal(expAlerts, gotAlerts, cmp.Comparer(labels.Equal)) {
var testName string var testName string
if tg.TestGroupName != "" { if tg.TestGroupName != "" {
testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName) testName = fmt.Sprintf(" name: %s,\n", tg.TestGroupName)
} }
expString := indentLines(expAlerts.String(), " ") expString := indentLines(expAlerts.String(), " ")
gotString := indentLines(gotAlerts.String(), " ") gotString := indentLines(gotAlerts.String(), " ")
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v", if diffFlag {
testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString)) // If empty, populates an empty value
if gotAlerts.Len() == 0 {
gotAlerts = append(gotAlerts, labelAndAnnotation{
Labels: labels.Labels{},
Annotations: labels.Labels{},
})
}
// If empty, populates an empty value
if expAlerts.Len() == 0 {
expAlerts = append(expAlerts, labelAndAnnotation{
Labels: labels.Labels{},
Annotations: labels.Labels{},
})
}
diffOpts := jsondiff.DefaultConsoleOptions()
expAlertsJSON, err := json.Marshal(expAlerts)
if err != nil {
errs = append(errs, fmt.Errorf("error marshaling expected %s alert: [%s]", tg.TestGroupName, err.Error()))
continue
}
gotAlertsJSON, err := json.Marshal(gotAlerts)
if err != nil {
errs = append(errs, fmt.Errorf("error marshaling received %s alert: [%s]", tg.TestGroupName, err.Error()))
continue
}
res, diff := jsondiff.Compare(expAlertsJSON, gotAlertsJSON, &diffOpts)
if res != jsondiff.FullMatch {
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n diff: %v",
testName, testcase.Alertname, testcase.EvalTime.String(), indentLines(diff, " ")))
}
} else {
errs = append(errs, fmt.Errorf("%s alertname: %s, time: %s, \n exp:%v, \n got:%v",
testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
}
} }
} }
@ -370,7 +414,7 @@ Outer:
gotSamples = append(gotSamples, parsedSample{ gotSamples = append(gotSamples, parsedSample{
Labels: s.Metric.Copy(), Labels: s.Metric.Copy(),
Value: s.F, Value: s.F,
Histogram: promql.HistogramTestExpression(s.H), Histogram: promqltest.HistogramTestExpression(s.H),
}) })
} }
@ -400,7 +444,7 @@ Outer:
expSamples = append(expSamples, parsedSample{ expSamples = append(expSamples, parsedSample{
Labels: lb, Labels: lb,
Value: s.Value, Value: s.Value,
Histogram: promql.HistogramTestExpression(hist), Histogram: promqltest.HistogramTestExpression(hist),
}) })
} }
@ -410,7 +454,7 @@ Outer:
sort.Slice(gotSamples, func(i, j int) bool { sort.Slice(gotSamples, func(i, j int) bool {
return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0 return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
}) })
if !reflect.DeepEqual(expSamples, gotSamples) { if !cmp.Equal(expSamples, gotSamples, cmp.Comparer(labels.Equal)) {
errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr, errs = append(errs, fmt.Errorf(" expr: %q, time: %s,\n exp: %v\n got: %v", testCase.Expr,
testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples))) testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
} }
@ -529,7 +573,7 @@ func (la labelsAndAnnotations) String() string {
} }
s := "[\n0:" + indentLines("\n"+la[0].String(), " ") s := "[\n0:" + indentLines("\n"+la[0].String(), " ")
for i, l := range la[1:] { for i, l := range la[1:] {
s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), " ") s += ",\n" + strconv.Itoa(i+1) + ":" + indentLines("\n"+l.String(), " ")
} }
s += "\n]" s += "\n]"

View file

@ -16,7 +16,9 @@ package main
import ( import (
"testing" "testing"
"github.com/prometheus/prometheus/promql" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/promql/promqltest"
) )
func TestRulesUnitTest(t *testing.T) { func TestRulesUnitTest(t *testing.T) {
@ -26,7 +28,7 @@ func TestRulesUnitTest(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
args args args args
queryOpts promql.LazyLoaderOpts queryOpts promqltest.LazyLoaderOpts
want int want int
}{ }{
{ {
@ -90,7 +92,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{ args: args{
files: []string{"./testdata/at-modifier-test.yml"}, files: []string{"./testdata/at-modifier-test.yml"},
}, },
queryOpts: promql.LazyLoaderOpts{ queryOpts: promqltest.LazyLoaderOpts{
EnableAtModifier: true, EnableAtModifier: true,
}, },
want: 0, want: 0,
@ -107,7 +109,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{ args: args{
files: []string{"./testdata/negative-offset-test.yml"}, files: []string{"./testdata/negative-offset-test.yml"},
}, },
queryOpts: promql.LazyLoaderOpts{ queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true, EnableNegativeOffset: true,
}, },
want: 0, want: 0,
@ -117,7 +119,7 @@ func TestRulesUnitTest(t *testing.T) {
args: args{ args: args{
files: []string{"./testdata/no-test-group-interval.yml"}, files: []string{"./testdata/no-test-group-interval.yml"},
}, },
queryOpts: promql.LazyLoaderOpts{ queryOpts: promqltest.LazyLoaderOpts{
EnableNegativeOffset: true, EnableNegativeOffset: true,
}, },
want: 0, want: 0,
@ -125,7 +127,7 @@ func TestRulesUnitTest(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.queryOpts, nil, tt.args.files...); got != tt.want { if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
} }
}) })
@ -140,7 +142,7 @@ func TestRulesUnitTestRun(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
args args args args
queryOpts promql.LazyLoaderOpts queryOpts promqltest.LazyLoaderOpts
want int want int
}{ }{
{ {
@ -178,9 +180,8 @@ func TestRulesUnitTestRun(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
if got := RulesUnitTest(tt.queryOpts, tt.args.run, tt.args.files...); got != tt.want { got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...)
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want) require.Equal(t, tt.want, got)
}
}) })
} }
} }

View file

@ -82,7 +82,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
return cfg, nil return cfg, nil
} }
b := labels.ScratchBuilder{} b := labels.NewScratchBuilder(0)
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
newV := os.Expand(v.Value, func(s string) string { newV := os.Expand(v.Value, func(s string) string {
if s == "$" { if s == "$" {
@ -97,6 +97,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
if newV != v.Value { if newV != v.Value {
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
} }
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
b.Add(v.Name, newV) b.Add(v.Name, newV)
}) })
cfg.GlobalConfig.ExternalLabels = b.Labels() cfg.GlobalConfig.ExternalLabels = b.Labels()
@ -610,9 +611,12 @@ type ScrapeConfig struct {
// More than this label value length post metric-relabeling will cause the // More than this label value length post metric-relabeling will cause the
// scrape to fail. 0 means no limit. // scrape to fail. 0 means no limit.
LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"` LabelValueLengthLimit uint `yaml:"label_value_length_limit,omitempty"`
// More than this many buckets in a native histogram will cause the scrape to // If there are more than this many buckets in a native histogram,
// fail. // buckets will be merged to stay within the limit.
NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"` NativeHistogramBucketLimit uint `yaml:"native_histogram_bucket_limit,omitempty"`
// If the growth factor of one bucket to the next is smaller than this,
// buckets will be merged to increase the factor sufficiently.
NativeHistogramMinBucketFactor float64 `yaml:"native_histogram_min_bucket_factor,omitempty"`
// Keep no more than this many dropped targets per job. // Keep no more than this many dropped targets per job.
// 0 means no limit. // 0 means no limit.
KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"`
@ -937,6 +941,8 @@ type AlertmanagerConfig struct {
// List of Alertmanager relabel configurations. // List of Alertmanager relabel configurations.
RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"`
// Relabel alerts before sending to the specific alertmanager.
AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"`
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -979,6 +985,12 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
} }
} }
for _, rlcfg := range c.AlertRelabelConfigs {
if rlcfg == nil {
return errors.New("empty or null Alertmanager alert relabeling rule")
}
}
return nil return nil
} }
@ -1124,6 +1136,9 @@ type QueueConfig struct {
MinBackoff model.Duration `yaml:"min_backoff,omitempty"` MinBackoff model.Duration `yaml:"min_backoff,omitempty"`
MaxBackoff model.Duration `yaml:"max_backoff,omitempty"` MaxBackoff model.Duration `yaml:"max_backoff,omitempty"`
RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"` RetryOnRateLimit bool `yaml:"retry_on_http_429,omitempty"`
// Samples older than the limit will be dropped.
SampleAgeLimit model.Duration `yaml:"sample_age_limit,omitempty"`
} }
// MetadataConfig is the configuration for sending metadata to remote // MetadataConfig is the configuration for sending metadata to remote

View file

@ -12,7 +12,6 @@
// limitations under the License. // limitations under the License.
//go:build !windows //go:build !windows
// +build !windows
package config package config

View file

@ -58,6 +58,7 @@ import (
"github.com/prometheus/prometheus/discovery/zookeeper" "github.com/prometheus/prometheus/discovery/zookeeper"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/model/relabel"
"github.com/prometheus/prometheus/util/testutil"
) )
func mustParseURL(u string) *config.URL { func mustParseURL(u string) *config.URL {
@ -568,6 +569,7 @@ var expectedConf = &Config{
ServiceDiscoveryConfigs: discovery.Configs{ ServiceDiscoveryConfigs: discovery.Configs{
&xds.KumaSDConfig{ &xds.KumaSDConfig{
Server: "http://kuma-control-plane.kuma-system.svc:5676", Server: "http://kuma-control-plane.kuma-system.svc:5676",
ClientID: "main-prometheus",
HTTPClientConfig: config.DefaultHTTPClientConfig, HTTPClientConfig: config.DefaultHTTPClientConfig,
RefreshInterval: model.Duration(15 * time.Second), RefreshInterval: model.Duration(15 * time.Second),
FetchTimeout: model.Duration(2 * time.Minute), FetchTimeout: model.Duration(2 * time.Minute),
@ -1838,7 +1840,7 @@ var expectedErrors = []struct {
}, },
{ {
filename: "azure_authentication_method.bad.yml", filename: "azure_authentication_method.bad.yml",
errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"", errMsg: "unknown authentication_type \"invalid\". Supported types are \"OAuth\", \"ManagedIdentity\" or \"SDK\"",
}, },
{ {
filename: "azure_bearertoken_basicauth.bad.yml", filename: "azure_bearertoken_basicauth.bad.yml",
@ -2036,16 +2038,16 @@ func TestExpandExternalLabels(t *testing.T) {
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
os.Setenv("TEST", "TestValue") os.Setenv("TEST", "TestValue")
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
} }
func TestAgentMode(t *testing.T) { func TestAgentMode(t *testing.T) {

View file

@ -221,6 +221,7 @@ scrape_configs:
kuma_sd_configs: kuma_sd_configs:
- server: http://kuma-control-plane.kuma-system.svc:5676 - server: http://kuma-control-plane.kuma-system.svc:5676
client_id: main-prometheus
- job_name: service-marathon - job_name: service-marathon
marathon_sd_configs: marathon_sd_configs:

View file

@ -108,6 +108,7 @@ scrape_configs:
kuma_sd_configs: kuma_sd_configs:
- server: http://kuma-control-plane.kuma-system.svc:5676 - server: http://kuma-control-plane.kuma-system.svc:5676
client_id: main-prometheus
marathon_sd_configs: marathon_sd_configs:
- servers: - servers:

View file

@ -47,7 +47,7 @@
<script> <script>
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#cpuGraph"), node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))", expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
renderer: 'area', renderer: 'area',
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}}, max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix, yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,

View file

@ -69,7 +69,7 @@
<script> <script>
new PromConsole.Graph({ new PromConsole.Graph({
node: document.querySelector("#cpuGraph"), node: document.querySelector("#cpuGraph"),
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))", expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
renderer: 'area', renderer: 'area',
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}}, max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix, yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,

View file

@ -21,7 +21,7 @@
<tr> <tr>
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td> <td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td> <td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance)(irate(node_cpu_seconds_total{job='node',mode='idle',instance='%s'}[5m])))" .Labels.instance) "%" "printf.1f") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}</td>
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td> <td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
</tr> </tr>
{{ else }} {{ else }}

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -97,12 +98,19 @@ type EC2SDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*EC2SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &ec2Metrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the EC2 Config. // Name returns the name of the EC2 Config.
func (*EC2SDConfig) Name() string { return "ec2" } func (*EC2SDConfig) Name() string { return "ec2" }
// NewDiscoverer returns a Discoverer for the EC2 Config. // NewDiscoverer returns a Discoverer for the EC2 Config.
func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *EC2SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewEC2Discovery(c, opts.Logger, opts.Registerer), nil return NewEC2Discovery(c, opts.Logger, opts.Metrics)
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config. // UnmarshalYAML implements the yaml.Unmarshaler interface for the EC2 Config.
@ -148,7 +156,12 @@ type EC2Discovery struct {
} }
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, reg prometheus.Registerer) *EC2Discovery { func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -158,14 +171,14 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, reg prometheus.Regist
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "ec2", Mech: "ec2",
Interval: time.Duration(d.cfg.RefreshInterval), Interval: time.Duration(d.cfg.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
) )
return d return d, nil
} }
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) { func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
@ -267,7 +280,7 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
if inst.PrivateDnsName != nil { if inst.PrivateDnsName != nil {
labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName) labels[ec2LabelPrivateDNS] = model.LabelValue(*inst.PrivateDnsName)
} }
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
if inst.Platform != nil { if inst.Platform != nil {

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"time" "time"
@ -80,12 +81,19 @@ type LightsailSDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*LightsailSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &lightsailMetrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the Lightsail Config. // Name returns the name of the Lightsail Config.
func (*LightsailSDConfig) Name() string { return "lightsail" } func (*LightsailSDConfig) Name() string { return "lightsail" }
// NewDiscoverer returns a Discoverer for the Lightsail Config. // NewDiscoverer returns a Discoverer for the Lightsail Config.
func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *LightsailSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewLightsailDiscovery(c, opts.Logger, opts.Registerer), nil return NewLightsailDiscovery(c, opts.Logger, opts.Metrics)
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config. // UnmarshalYAML implements the yaml.Unmarshaler interface for the Lightsail Config.
@ -122,23 +130,29 @@ type LightsailDiscovery struct {
} }
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, reg prometheus.Registerer) *LightsailDiscovery { func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
d := &LightsailDiscovery{ d := &LightsailDiscovery{
cfg: conf, cfg: conf,
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "lightsail", Mech: "lightsail",
Interval: time.Duration(d.cfg.RefreshInterval), Interval: time.Duration(d.cfg.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
) )
return d return d, nil
} }
func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) { func (d *LightsailDiscovery) lightsailClient() (*lightsail.Lightsail, error) {
@ -216,7 +230,7 @@ func (d *LightsailDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
lightsailLabelRegion: model.LabelValue(d.cfg.Region), lightsailLabelRegion: model.LabelValue(d.cfg.Region),
} }
addr := net.JoinHostPort(*inst.PrivateIpAddress, fmt.Sprintf("%d", d.cfg.Port)) addr := net.JoinHostPort(*inst.PrivateIpAddress, strconv.Itoa(d.cfg.Port))
labels[model.AddressLabel] = model.LabelValue(addr) labels[model.AddressLabel] = model.LabelValue(addr)
if inst.PublicIpAddress != nil { if inst.PublicIpAddress != nil {

View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"github.com/prometheus/prometheus/discovery"
)
type ec2Metrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
var _ discovery.DiscovererMetrics = (*ec2Metrics)(nil)
// Register implements discovery.DiscovererMetrics.
func (m *ec2Metrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *ec2Metrics) Unregister() {}

View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"github.com/prometheus/prometheus/discovery"
)
type lightsailMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
var _ discovery.DiscovererMetrics = (*lightsailMetrics)(nil)
// Register implements discovery.DiscovererMetrics.
func (m *lightsailMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *lightsailMetrics) Unregister() {}

View file

@ -20,6 +20,7 @@ import (
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -30,8 +31,8 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache" cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log" "github.com/go-kit/log"
@ -65,6 +66,7 @@ const (
azureLabelMachineSize = azureLabel + "machine_size" azureLabelMachineSize = azureLabel + "machine_size"
authMethodOAuth = "OAuth" authMethodOAuth = "OAuth"
authMethodSDK = "SDK"
authMethodManagedIdentity = "ManagedIdentity" authMethodManagedIdentity = "ManagedIdentity"
) )
@ -120,12 +122,17 @@ type SDConfig struct {
HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return newDiscovererMetrics(reg, rmi)
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "azure" } func (*SDConfig) Name() string { return "azure" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.Metrics)
} }
func validateAuthParam(param, name string) error { func validateAuthParam(param, name string) error {
@ -159,8 +166,8 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
} }
} }
if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity { if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity && c.AuthenticationMethod != authMethodSDK {
return fmt.Errorf("unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity) return fmt.Errorf("unknown authentication_type %q. Supported types are %q, %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity, authMethodSDK)
} }
return c.HTTPClientConfig.Validate() return c.HTTPClientConfig.Validate()
@ -168,51 +175,53 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type Discovery struct { type Discovery struct {
*refresh.Discovery *refresh.Discovery
logger log.Logger logger log.Logger
cfg *SDConfig cfg *SDConfig
port int port int
cache *cache.Cache[string, *armnetwork.Interface] cache *cache.Cache[string, *armnetwork.Interface]
failuresCount prometheus.Counter metrics *azureMetrics
cacheHitCount prometheus.Counter
} }
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
func NewDiscovery(cfg *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
d := &Discovery{ d := &Discovery{
cfg: cfg, cfg: cfg,
port: cfg.Port, port: cfg.Port,
logger: logger, logger: logger,
cache: l, cache: l,
failuresCount: prometheus.NewCounter( metrics: m,
prometheus.CounterOpts{
Name: "prometheus_sd_azure_failures_total",
Help: "Number of Azure service discovery refresh failures.",
}),
cacheHitCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_azure_cache_hit_total",
Help: "Number of cache hit during refresh.",
}),
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "azure", Mech: "azure",
Interval: time.Duration(cfg.RefreshInterval), Interval: time.Duration(cfg.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
Metrics: []prometheus.Collector{d.failuresCount, d.cacheHitCount},
}, },
) )
return d, nil return d, nil
} }
type client interface {
getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error)
getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error)
getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error)
getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error)
getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error)
}
// azureClient represents multiple Azure Resource Manager providers. // azureClient represents multiple Azure Resource Manager providers.
type azureClient struct { type azureClient struct {
nic *armnetwork.InterfacesClient nic *armnetwork.InterfacesClient
@ -222,14 +231,17 @@ type azureClient struct {
logger log.Logger logger log.Logger
} }
var _ client = &azureClient{}
// createAzureClient is a helper function for creating an Azure compute client to ARM. // createAzureClient is a helper function for creating an Azure compute client to ARM.
func createAzureClient(cfg SDConfig) (azureClient, error) { func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
var c azureClient var c azureClient
c.logger = logger
telemetry := policy.TelemetryOptions{ telemetry := policy.TelemetryOptions{
ApplicationID: userAgent, ApplicationID: userAgent,
@ -240,12 +252,12 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
Telemetry: telemetry, Telemetry: telemetry,
}) })
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd") client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
options := &arm.ClientOptions{ options := &arm.ClientOptions{
ClientOptions: policy.ClientOptions{ ClientOptions: policy.ClientOptions{
@ -257,25 +269,25 @@ func createAzureClient(cfg SDConfig) (azureClient, error) {
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options) c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options) c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options) c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options) c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
if err != nil { if err != nil {
return azureClient{}, err return &azureClient{}, err
} }
return c, nil return &c, nil
} }
func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) { func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azcore.TokenCredential, error) {
@ -295,6 +307,16 @@ func newCredential(cfg SDConfig, policyClientOptions policy.ClientOptions) (azco
return nil, err return nil, err
} }
credential = azcore.TokenCredential(secretCredential) credential = azcore.TokenCredential(secretCredential)
case authMethodSDK:
options := &azidentity.DefaultAzureCredentialOptions{ClientOptions: policyClientOptions}
if len(cfg.TenantID) != 0 {
options.TenantID = cfg.TenantID
}
sdkCredential, err := azidentity.NewDefaultAzureCredential(options)
if err != nil {
return nil, err
}
credential = azcore.TokenCredential(sdkCredential)
} }
return credential, nil return credential, nil
} }
@ -331,16 +353,15 @@ func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, erro
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
defer level.Debug(d.logger).Log("msg", "Azure discovery completed") defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
client, err := createAzureClient(*d.cfg) client, err := createAzureClient(*d.cfg, d.logger)
if err != nil { if err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not create Azure client: %w", err) return nil, fmt.Errorf("could not create Azure client: %w", err)
} }
client.logger = d.logger
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup) machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
if err != nil { if err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not get virtual machines: %w", err) return nil, fmt.Errorf("could not get virtual machines: %w", err)
} }
@ -349,14 +370,14 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
// Load the vms managed by scale sets. // Load the vms managed by scale sets.
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
if err != nil { if err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err) return nil, fmt.Errorf("could not get virtual machine scale sets: %w", err)
} }
for _, scaleSet := range scaleSets { for _, scaleSet := range scaleSets {
scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet) scaleSetVms, err := client.getScaleSetVMs(ctx, scaleSet)
if err != nil { if err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err) return nil, fmt.Errorf("could not get virtual machine scale set vms: %w", err)
} }
machines = append(machines, scaleSetVms...) machines = append(machines, scaleSetVms...)
@ -375,102 +396,8 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
for _, vm := range machines { for _, vm := range machines {
go func(vm virtualMachine) { go func(vm virtualMachine) {
defer wg.Done() defer wg.Done()
r, err := newAzureResourceFromID(vm.ID, d.logger) labelSet, err := d.vmToLabelSet(ctx, client, vm)
if err != nil { ch <- target{labelSet: labelSet, err: err}
ch <- target{labelSet: nil, err: err}
return
}
labels := model.LabelSet{
azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID),
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet)
}
for k, v := range vm.Tags {
name := strutil.SanitizeLabelName(k)
labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v)
}
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
var networkInterface *armnetwork.Interface
if v, ok := d.getFromCache(nicID); ok {
networkInterface = v
d.cacheHitCount.Add(1)
} else {
if vm.ScaleSet == "" {
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
ch <- target{labelSet: nil, err: err}
}
// Get out of this routine because we cannot continue without a network interface.
return
}
d.addToCache(nicID, networkInterface)
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
ch <- target{labelSet: nil, err: err}
}
// Get out of this routine because we cannot continue without a network interface.
return
}
d.addToCache(nicID, networkInterface)
}
}
if networkInterface.Properties == nil {
continue
}
// Unfortunately Azure does not return information on whether a VM is deallocated.
// This information is available via another API call however the Go SDK does not
// yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return
}
if *networkInterface.Properties.Primary {
for _, ip := range networkInterface.Properties.IPConfigurations {
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, fmt.Sprintf("%d", d.port))
labels[model.AddressLabel] = model.LabelValue(address)
ch <- target{labelSet: labels, err: nil}
return
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
err = fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
ch <- target{labelSet: nil, err: err}
return
}
}
}
}(vm) }(vm)
} }
@ -480,7 +407,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
var tg targetgroup.Group var tg targetgroup.Group
for tgt := range ch { for tgt := range ch {
if tgt.err != nil { if tgt.err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err) return nil, fmt.Errorf("unable to complete Azure service discovery: %w", tgt.err)
} }
if tgt.labelSet != nil { if tgt.labelSet != nil {
@ -491,6 +418,95 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return []*targetgroup.Group{&tg}, nil return []*targetgroup.Group{&tg}, nil
} }
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
r, err := newAzureResourceFromID(vm.ID, d.logger)
if err != nil {
return nil, err
}
labels := model.LabelSet{
azureLabelSubscriptionID: model.LabelValue(d.cfg.SubscriptionID),
azureLabelTenantID: model.LabelValue(d.cfg.TenantID),
azureLabelMachineID: model.LabelValue(vm.ID),
azureLabelMachineName: model.LabelValue(vm.Name),
azureLabelMachineComputerName: model.LabelValue(vm.ComputerName),
azureLabelMachineOSType: model.LabelValue(vm.OsType),
azureLabelMachineLocation: model.LabelValue(vm.Location),
azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroupName),
azureLabelMachineSize: model.LabelValue(vm.Size),
}
if vm.ScaleSet != "" {
labels[azureLabelMachineScaleSet] = model.LabelValue(vm.ScaleSet)
}
for k, v := range vm.Tags {
name := strutil.SanitizeLabelName(k)
labels[azureLabelMachineTag+model.LabelName(name)] = model.LabelValue(*v)
}
// Get the IP address information via separate call to the network provider.
for _, nicID := range vm.NetworkInterfaces {
var networkInterface *armnetwork.Interface
if v, ok := d.getFromCache(nicID); ok {
networkInterface = v
d.metrics.cacheHitCount.Add(1)
} else {
if vm.ScaleSet == "" {
networkInterface, err = client.getVMNetworkInterfaceByID(ctx, nicID)
} else {
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
}
if err != nil {
if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
} else {
return nil, err
}
// Get out of this routine because we cannot continue without a network interface.
return nil, nil
}
// Continue processing with the network interface
d.addToCache(nicID, networkInterface)
}
if networkInterface.Properties == nil {
continue
}
// Unfortunately Azure does not return information on whether a VM is deallocated.
// This information is available via another API call however the Go SDK does not
// yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
return nil, nil
}
if *networkInterface.Properties.Primary {
for _, ip := range networkInterface.Properties.IPConfigurations {
// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
// therefore we need to validate that both are not nil.
if ip.Properties != nil && ip.Properties.PublicIPAddress != nil && ip.Properties.PublicIPAddress.Properties != nil && ip.Properties.PublicIPAddress.Properties.IPAddress != nil {
labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.Properties.PublicIPAddress.Properties.IPAddress)
}
if ip.Properties != nil && ip.Properties.PrivateIPAddress != nil {
labels[azureLabelMachinePrivateIP] = model.LabelValue(*ip.Properties.PrivateIPAddress)
address := net.JoinHostPort(*ip.Properties.PrivateIPAddress, strconv.Itoa(d.port))
labels[model.AddressLabel] = model.LabelValue(address)
return labels, nil
}
// If we made it here, we don't have a private IP which should be impossible.
// Return an empty target and error to ensure an all or nothing situation.
return nil, fmt.Errorf("unable to find a private IP for VM %s", vm.Name)
}
}
}
// TODO: Should we say something at this point?
return nil, nil
}
func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) { func (client *azureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
var vms []virtualMachine var vms []virtualMachine
if len(resourceGroup) == 0 { if len(resourceGroup) == 0 {
@ -570,7 +586,7 @@ func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompu
} }
func mapFromVM(vm armcompute.VirtualMachine) virtualMachine { func mapFromVM(vm armcompute.VirtualMachine) virtualMachine {
osType := string(*vm.Properties.StorageProfile.OSDisk.OSType) var osType string
tags := map[string]*string{} tags := map[string]*string{}
networkInterfaces := []string{} networkInterfaces := []string{}
var computerName string var computerName string
@ -581,6 +597,12 @@ func mapFromVM(vm armcompute.VirtualMachine) virtualMachine {
} }
if vm.Properties != nil { if vm.Properties != nil {
if vm.Properties.StorageProfile != nil &&
vm.Properties.StorageProfile.OSDisk != nil &&
vm.Properties.StorageProfile.OSDisk.OSType != nil {
osType = string(*vm.Properties.StorageProfile.OSDisk.OSType)
}
if vm.Properties.NetworkProfile != nil { if vm.Properties.NetworkProfile != nil {
for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces { for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces {
networkInterfaces = append(networkInterfaces, *vmNIC.ID) networkInterfaces = append(networkInterfaces, *vmNIC.ID)
@ -609,7 +631,7 @@ func mapFromVM(vm armcompute.VirtualMachine) virtualMachine {
} }
func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine { func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName string) virtualMachine {
osType := string(*vm.Properties.StorageProfile.OSDisk.OSType) var osType string
tags := map[string]*string{} tags := map[string]*string{}
networkInterfaces := []string{} networkInterfaces := []string{}
var computerName string var computerName string
@ -620,6 +642,12 @@ func mapFromVMScaleSetVM(vm armcompute.VirtualMachineScaleSetVM, scaleSetName st
} }
if vm.Properties != nil { if vm.Properties != nil {
if vm.Properties.StorageProfile != nil &&
vm.Properties.StorageProfile.OSDisk != nil &&
vm.Properties.StorageProfile.OSDisk.OSType != nil {
osType = string(*vm.Properties.StorageProfile.OSDisk.OSType)
}
if vm.Properties.NetworkProfile != nil { if vm.Properties.NetworkProfile != nil {
for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces { for _, vmNIC := range vm.Properties.NetworkProfile.NetworkInterfaces {
networkInterfaces = append(networkInterfaces, *vmNIC.ID) networkInterfaces = append(networkInterfaces, *vmNIC.ID)

View file

@ -14,16 +14,24 @@
package azure package azure
import ( import (
"context"
"fmt"
"testing" "testing"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
goleak.VerifyTestMain(m) goleak.VerifyTestMain(m,
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
)
} }
func TestMapFromVMWithEmptyTags(t *testing.T) { func TestMapFromVMWithEmptyTags(t *testing.T) {
@ -79,6 +87,140 @@ func TestMapFromVMWithEmptyTags(t *testing.T) {
require.Equal(t, expectedVM, actualVM) require.Equal(t, expectedVM, actualVM)
} }
func TestVMToLabelSet(t *testing.T) {
id := "/subscriptions/00000000-0000-0000-0000-000000000000/test"
name := "name"
size := "size"
vmSize := armcompute.VirtualMachineSizeTypes(size)
osType := armcompute.OperatingSystemTypesLinux
vmType := "type"
location := "westeurope"
computerName := "computer_name"
networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1"
ipAddress := "10.20.30.40"
primary := true
networkProfile := armcompute.NetworkProfile{
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
{
ID: &networkID,
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
},
},
}
properties := &armcompute.VirtualMachineProperties{
OSProfile: &armcompute.OSProfile{
ComputerName: &computerName,
},
StorageProfile: &armcompute.StorageProfile{
OSDisk: &armcompute.OSDisk{
OSType: &osType,
},
},
NetworkProfile: &networkProfile,
HardwareProfile: &armcompute.HardwareProfile{
VMSize: &vmSize,
},
}
testVM := armcompute.VirtualMachine{
ID: &id,
Name: &name,
Type: &vmType,
Location: &location,
Tags: nil,
Properties: properties,
}
expectedVM := virtualMachine{
ID: id,
Name: name,
ComputerName: computerName,
Type: vmType,
Location: location,
OsType: "Linux",
Tags: map[string]*string{},
NetworkInterfaces: []string{networkID},
Size: size,
}
actualVM := mapFromVM(testVM)
require.Equal(t, expectedVM, actualVM)
cfg := DefaultSDConfig
d := &Discovery{
cfg: &cfg,
logger: log.NewNopLogger(),
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
}
network := armnetwork.Interface{
Name: &networkID,
Properties: &armnetwork.InterfacePropertiesFormat{
Primary: &primary,
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
{Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
PrivateIPAddress: &ipAddress,
}},
},
},
}
client := &mockAzureClient{
networkInterface: &network,
}
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
require.NoError(t, err)
require.Len(t, labelSet, 11)
}
func TestMapFromVMWithEmptyOSType(t *testing.T) {
id := "test"
name := "name"
size := "size"
vmSize := armcompute.VirtualMachineSizeTypes(size)
vmType := "type"
location := "westeurope"
computerName := "computer_name"
networkProfile := armcompute.NetworkProfile{
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
}
properties := &armcompute.VirtualMachineProperties{
OSProfile: &armcompute.OSProfile{
ComputerName: &computerName,
},
StorageProfile: &armcompute.StorageProfile{
OSDisk: &armcompute.OSDisk{},
},
NetworkProfile: &networkProfile,
HardwareProfile: &armcompute.HardwareProfile{
VMSize: &vmSize,
},
}
testVM := armcompute.VirtualMachine{
ID: &id,
Name: &name,
Type: &vmType,
Location: &location,
Tags: nil,
Properties: properties,
}
expectedVM := virtualMachine{
ID: id,
Name: name,
ComputerName: computerName,
Type: vmType,
Location: location,
Tags: map[string]*string{},
NetworkInterfaces: []string{},
Size: size,
}
actualVM := mapFromVM(testVM)
require.Equal(t, expectedVM, actualVM)
}
func TestMapFromVMWithTags(t *testing.T) { func TestMapFromVMWithTags(t *testing.T) {
id := "test" id := "test"
name := "name" name := "name"
@ -193,6 +335,58 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) {
require.Equal(t, expectedVM, actualVM) require.Equal(t, expectedVM, actualVM)
} }
func TestMapFromVMScaleSetVMWithEmptyOSType(t *testing.T) {
id := "test"
name := "name"
size := "size"
vmSize := armcompute.VirtualMachineSizeTypes(size)
vmType := "type"
instanceID := "123"
location := "westeurope"
computerName := "computer_name"
networkProfile := armcompute.NetworkProfile{
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{},
}
properties := &armcompute.VirtualMachineScaleSetVMProperties{
OSProfile: &armcompute.OSProfile{
ComputerName: &computerName,
},
StorageProfile: &armcompute.StorageProfile{},
NetworkProfile: &networkProfile,
HardwareProfile: &armcompute.HardwareProfile{
VMSize: &vmSize,
},
}
testVM := armcompute.VirtualMachineScaleSetVM{
ID: &id,
Name: &name,
Type: &vmType,
InstanceID: &instanceID,
Location: &location,
Tags: nil,
Properties: properties,
}
scaleSet := "testSet"
expectedVM := virtualMachine{
ID: id,
Name: name,
ComputerName: computerName,
Type: vmType,
Location: location,
Tags: map[string]*string{},
NetworkInterfaces: []string{},
ScaleSet: scaleSet,
InstanceID: instanceID,
Size: size,
}
actualVM := mapFromVMScaleSetVM(testVM, scaleSet)
require.Equal(t, expectedVM, actualVM)
}
func TestMapFromVMScaleSetVMWithTags(t *testing.T) { func TestMapFromVMScaleSetVMWithTags(t *testing.T) {
id := "test" id := "test"
name := "name" name := "name"
@ -280,3 +474,35 @@ func TestNewAzureResourceFromID(t *testing.T) {
require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName) require.Equal(t, tc.expected.ResourceGroupName, actual.ResourceGroupName)
} }
} }
type mockAzureClient struct {
networkInterface *armnetwork.Interface
}
var _ client = &mockAzureClient{}
func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
return nil, nil
}
func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
return nil, nil
}
func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
if networkInterfaceID == "" {
return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty")
}
return m.networkInterface, nil
}
func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
if scaleSetName == "" {
return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty")
}
return m.networkInterface, nil
}

View file

@ -0,0 +1,64 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package azure
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*azureMetrics)(nil)
type azureMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
failuresCount prometheus.Counter
cacheHitCount prometheus.Counter
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &azureMetrics{
refreshMetrics: rmi,
failuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_azure_failures_total",
Help: "Number of Azure service discovery refresh failures.",
}),
cacheHitCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_azure_cache_hit_total",
Help: "Number of cache hit during refresh.",
}),
}
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
m.failuresCount,
m.cacheHitCount,
})
return m
}
// Register implements discovery.DiscovererMetrics.
func (m *azureMetrics) Register() error {
return m.metricRegisterer.RegisterMetrics()
}
// Unregister implements discovery.DiscovererMetrics.
func (m *azureMetrics) Unregister() {
m.metricRegisterer.UnregisterMetrics()
}

View file

@ -119,12 +119,17 @@ type SDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return newDiscovererMetrics(reg, rmi)
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "consul" } func (*SDConfig) Name() string { return "consul" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.Metrics)
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -161,27 +166,28 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// Discovery retrieves target information from a Consul server // Discovery retrieves target information from a Consul server
// and updates them via watches. // and updates them via watches.
type Discovery struct { type Discovery struct {
client *consul.Client client *consul.Client
clientDatacenter string clientDatacenter string
clientNamespace string clientNamespace string
clientPartition string clientPartition string
tagSeparator string tagSeparator string
watchedServices []string // Set of services which will be discovered. watchedServices []string // Set of services which will be discovered.
watchedTags []string // Tags used to filter instances of a service. watchedTags []string // Tags used to filter instances of a service.
watchedNodeMeta map[string]string watchedNodeMeta map[string]string
allowStale bool allowStale bool
refreshInterval time.Duration refreshInterval time.Duration
finalizer func() finalizer func()
logger log.Logger logger log.Logger
rpcFailuresCount prometheus.Counter metrics *consulMetrics
rpcDuration *prometheus.SummaryVec
servicesRPCDuration prometheus.Observer
serviceRPCDuration prometheus.Observer
metricRegisterer discovery.MetricRegisterer
} }
// NewDiscovery returns a new Discovery for the given config. // NewDiscovery returns a new Discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -219,35 +225,9 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
clientPartition: conf.Partition, clientPartition: conf.Partition,
finalizer: wrapper.CloseIdleConnections, finalizer: wrapper.CloseIdleConnections,
logger: logger, logger: logger,
rpcFailuresCount: prometheus.NewCounter( metrics: m,
prometheus.CounterOpts{
Namespace: namespace,
Name: "sd_consul_rpc_failures_total",
Help: "The number of Consul RPC call failures.",
}),
rpcDuration: prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "sd_consul_rpc_duration_seconds",
Help: "The duration of a Consul RPC call in seconds.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"endpoint", "call"},
),
} }
cd.metricRegisterer = discovery.NewMetricRegisterer(
reg,
[]prometheus.Collector{
cd.rpcFailuresCount,
cd.rpcDuration,
},
)
// Initialize metric vectors.
cd.servicesRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "services")
cd.serviceRPCDuration = cd.rpcDuration.WithLabelValues("catalog", "service")
return cd, nil return cd, nil
} }
@ -303,7 +283,7 @@ func (d *Discovery) getDatacenter() error {
info, err := d.client.Agent().Self() info, err := d.client.Agent().Self()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
d.rpcFailuresCount.Inc() d.metrics.rpcFailuresCount.Inc()
return err return err
} }
@ -344,13 +324,6 @@ func (d *Discovery) initialize(ctx context.Context) {
// Run implements the Discoverer interface. // Run implements the Discoverer interface.
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
err := d.metricRegisterer.RegisterMetrics()
if err != nil {
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
return
}
defer d.metricRegisterer.UnregisterMetrics()
if d.finalizer != nil { if d.finalizer != nil {
defer d.finalizer() defer d.finalizer()
} }
@ -399,7 +372,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
t0 := time.Now() t0 := time.Now()
srvs, meta, err := catalog.Services(opts.WithContext(ctx)) srvs, meta, err := catalog.Services(opts.WithContext(ctx))
elapsed := time.Since(t0) elapsed := time.Since(t0)
d.servicesRPCDuration.Observe(elapsed.Seconds()) d.metrics.servicesRPCDuration.Observe(elapsed.Seconds())
// Check the context before in order to exit early. // Check the context before in order to exit early.
select { select {
@ -410,7 +383,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
d.rpcFailuresCount.Inc() d.metrics.rpcFailuresCount.Inc()
time.Sleep(retryInterval) time.Sleep(retryInterval)
return return
} }
@ -490,8 +463,8 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
}, },
tagSeparator: d.tagSeparator, tagSeparator: d.tagSeparator,
logger: d.logger, logger: d.logger,
rpcFailuresCount: d.rpcFailuresCount, rpcFailuresCount: d.metrics.rpcFailuresCount,
serviceRPCDuration: d.serviceRPCDuration, serviceRPCDuration: d.metrics.serviceRPCDuration,
} }
go func() { go func() {
@ -566,9 +539,9 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
// since the service may be registered remotely through a different node. // since the service may be registered remotely through a different node.
var addr string var addr string
if serviceNode.Service.Address != "" { if serviceNode.Service.Address != "" {
addr = net.JoinHostPort(serviceNode.Service.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) addr = net.JoinHostPort(serviceNode.Service.Address, strconv.Itoa(serviceNode.Service.Port))
} else { } else {
addr = net.JoinHostPort(serviceNode.Node.Address, fmt.Sprintf("%d", serviceNode.Service.Port)) addr = net.JoinHostPort(serviceNode.Node.Address, strconv.Itoa(serviceNode.Service.Port))
} }
labels := model.LabelSet{ labels := model.LabelSet{

View file

@ -29,6 +29,7 @@ import (
"go.uber.org/goleak" "go.uber.org/goleak"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -36,20 +37,30 @@ func TestMain(m *testing.M) {
goleak.VerifyTestMain(m) goleak.VerifyTestMain(m)
} }
// TODO: Add ability to unregister metrics?
func NewTestMetrics(t *testing.T, conf discovery.Config, reg prometheus.Registerer) discovery.DiscovererMetrics {
refreshMetrics := discovery.NewRefreshMetrics(reg)
require.NoError(t, refreshMetrics.Register())
metrics := conf.NewDiscovererMetrics(prometheus.NewRegistry(), refreshMetrics)
require.NoError(t, metrics.Register())
return metrics
}
func TestConfiguredService(t *testing.T) { func TestConfiguredService(t *testing.T) {
conf := &SDConfig{ conf := &SDConfig{
Services: []string{"configuredServiceName"}, Services: []string{"configuredServiceName"},
} }
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
if err != nil { metrics := NewTestMetrics(t, conf, prometheus.NewRegistry())
t.Errorf("Unexpected error when initializing discovery %v", err)
} consulDiscovery, err := NewDiscovery(conf, nil, metrics)
if !consulDiscovery.shouldWatch("configuredServiceName", []string{""}) { require.NoError(t, err, "when initializing discovery")
t.Errorf("Expected service %s to be watched", "configuredServiceName") require.True(t, consulDiscovery.shouldWatch("configuredServiceName", []string{""}),
} "Expected service %s to be watched", "configuredServiceName")
if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { require.False(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}),
t.Errorf("Expected service %s to not be watched", "nonConfiguredServiceName") "Expected service %s to not be watched", "nonConfiguredServiceName")
}
} }
func TestConfiguredServiceWithTag(t *testing.T) { func TestConfiguredServiceWithTag(t *testing.T) {
@ -57,22 +68,22 @@ func TestConfiguredServiceWithTag(t *testing.T) {
Services: []string{"configuredServiceName"}, Services: []string{"configuredServiceName"},
ServiceTags: []string{"http"}, ServiceTags: []string{"http"},
} }
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
if err != nil { metrics := NewTestMetrics(t, conf, prometheus.NewRegistry())
t.Errorf("Unexpected error when initializing discovery %v", err)
} consulDiscovery, err := NewDiscovery(conf, nil, metrics)
if consulDiscovery.shouldWatch("configuredServiceName", []string{""}) { require.NoError(t, err, "when initializing discovery")
t.Errorf("Expected service %s to not be watched without tag", "configuredServiceName") require.False(t, consulDiscovery.shouldWatch("configuredServiceName", []string{""}),
} "Expected service %s to not be watched without tag", "configuredServiceName")
if !consulDiscovery.shouldWatch("configuredServiceName", []string{"http"}) {
t.Errorf("Expected service %s to be watched with tag %s", "configuredServiceName", "http") require.True(t, consulDiscovery.shouldWatch("configuredServiceName", []string{"http"}),
} "Expected service %s to be watched with tag %s", "configuredServiceName", "http")
if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) {
t.Errorf("Expected service %s to not be watched without tag", "nonConfiguredServiceName") require.False(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}),
} "Expected service %s to not be watched without tag", "nonConfiguredServiceName")
if consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{"http"}) {
t.Errorf("Expected service %s to not be watched with tag %s", "nonConfiguredServiceName", "http") require.False(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{"http"}),
} "Expected service %s to not be watched with tag %s", "nonConfiguredServiceName", "http")
} }
func TestConfiguredServiceWithTags(t *testing.T) { func TestConfiguredServiceWithTags(t *testing.T) {
@ -152,27 +163,24 @@ func TestConfiguredServiceWithTags(t *testing.T) {
} }
for _, tc := range cases { for _, tc := range cases {
consulDiscovery, err := NewDiscovery(tc.conf, nil, prometheus.NewRegistry()) metrics := NewTestMetrics(t, tc.conf, prometheus.NewRegistry())
if err != nil {
t.Errorf("Unexpected error when initializing discovery %v", err)
}
ret := consulDiscovery.shouldWatch(tc.serviceName, tc.serviceTags)
if ret != tc.shouldWatch {
t.Errorf("Expected should watch? %t, got %t. Watched service and tags: %s %+v, input was %s %+v", tc.shouldWatch, ret, tc.conf.Services, tc.conf.ServiceTags, tc.serviceName, tc.serviceTags)
}
consulDiscovery, err := NewDiscovery(tc.conf, nil, metrics)
require.NoError(t, err, "when initializing discovery")
ret := consulDiscovery.shouldWatch(tc.serviceName, tc.serviceTags)
require.Equal(t, tc.shouldWatch, ret, "Watched service and tags: %s %+v, input was %s %+v",
tc.conf.Services, tc.conf.ServiceTags, tc.serviceName, tc.serviceTags)
} }
} }
func TestNonConfiguredService(t *testing.T) { func TestNonConfiguredService(t *testing.T) {
conf := &SDConfig{} conf := &SDConfig{}
consulDiscovery, err := NewDiscovery(conf, nil, prometheus.NewRegistry())
if err != nil { metrics := NewTestMetrics(t, conf, prometheus.NewRegistry())
t.Errorf("Unexpected error when initializing discovery %v", err)
} consulDiscovery, err := NewDiscovery(conf, nil, metrics)
if !consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}) { require.NoError(t, err, "when initializing discovery")
t.Errorf("Expected service %s to be watched", "nonConfiguredServiceName") require.True(t, consulDiscovery.shouldWatch("nonConfiguredServiceName", []string{""}), "Expected service %s to be watched", "nonConfiguredServiceName")
}
} }
const ( const (
@ -263,7 +271,10 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
func newDiscovery(t *testing.T, config *SDConfig) *Discovery { func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
logger := log.NewNopLogger() logger := log.NewNopLogger()
d, err := NewDiscovery(config, logger, prometheus.NewRegistry())
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
d, err := NewDiscovery(config, logger, metrics)
require.NoError(t, err) require.NoError(t, err)
return d return d
} }
@ -477,13 +488,10 @@ oauth2:
var config SDConfig var config SDConfig
err := config.UnmarshalYAML(unmarshal([]byte(test.config))) err := config.UnmarshalYAML(unmarshal([]byte(test.config)))
if err != nil { if err != nil {
require.Equalf(t, err.Error(), test.errMessage, "Expected error '%s', got '%v'", test.errMessage, err) require.EqualError(t, err, test.errMessage)
return
}
if test.errMessage != "" {
t.Errorf("Expected error %s, got none", test.errMessage)
return return
} }
require.Empty(t, test.errMessage, "Expected error.")
require.Equal(t, test.expected, config) require.Equal(t, test.expected, config)
}) })

View file

@ -0,0 +1,73 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package consul
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*consulMetrics)(nil)
type consulMetrics struct {
rpcFailuresCount prometheus.Counter
rpcDuration *prometheus.SummaryVec
servicesRPCDuration prometheus.Observer
serviceRPCDuration prometheus.Observer
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &consulMetrics{
rpcFailuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Name: "sd_consul_rpc_failures_total",
Help: "The number of Consul RPC call failures.",
}),
rpcDuration: prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Name: "sd_consul_rpc_duration_seconds",
Help: "The duration of a Consul RPC call in seconds.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
[]string{"endpoint", "call"},
),
}
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
m.rpcFailuresCount,
m.rpcDuration,
})
// Initialize metric vectors.
m.servicesRPCDuration = m.rpcDuration.WithLabelValues("catalog", "services")
m.serviceRPCDuration = m.rpcDuration.WithLabelValues("catalog", "service")
return m
}
// Register implements discovery.DiscovererMetrics.
func (m *consulMetrics) Register() error {
return m.metricRegisterer.RegisterMetrics()
}
// Unregister implements discovery.DiscovererMetrics.
func (m *consulMetrics) Unregister() {
m.metricRegisterer.UnregisterMetrics()
}

View file

@ -63,6 +63,13 @@ func init() {
discovery.RegisterConfig(&SDConfig{}) discovery.RegisterConfig(&SDConfig{})
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &digitaloceanMetrics{
refreshMetrics: rmi,
}
}
// SDConfig is the configuration for DigitalOcean based service discovery. // SDConfig is the configuration for DigitalOcean based service discovery.
type SDConfig struct { type SDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"` HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
@ -76,7 +83,7 @@ func (*SDConfig) Name() string { return "digitalocean" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.Metrics)
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -104,7 +111,12 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
d := &Discovery{ d := &Discovery{
port: conf.Port, port: conf.Port,
} }
@ -127,11 +139,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "digitalocean", Mech: "digitalocean",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
) )
return d, nil return d, nil
@ -165,7 +177,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
} }
labels := model.LabelSet{ labels := model.LabelSet{
doLabelID: model.LabelValue(fmt.Sprintf("%d", droplet.ID)), doLabelID: model.LabelValue(strconv.Itoa(droplet.ID)),
doLabelName: model.LabelValue(droplet.Name), doLabelName: model.LabelValue(droplet.Name),
doLabelImage: model.LabelValue(droplet.Image.Slug), doLabelImage: model.LabelValue(droplet.Image.Slug),
doLabelImageName: model.LabelValue(droplet.Image.Name), doLabelImageName: model.LabelValue(droplet.Image.Name),

View file

@ -23,6 +23,8 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery"
) )
type DigitalOceanSDTestSuite struct { type DigitalOceanSDTestSuite struct {
@ -47,7 +49,15 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
cfg := DefaultSDConfig cfg := DefaultSDConfig
cfg.HTTPClientConfig.BearerToken = tokenID cfg.HTTPClientConfig.BearerToken = tokenID
d, err := NewDiscovery(&cfg, log.NewNopLogger(), prometheus.NewRegistry())
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
endpoint, err := url.Parse(sdmock.Mock.Endpoint()) endpoint, err := url.Parse(sdmock.Mock.Endpoint())
require.NoError(t, err) require.NoError(t, err)

View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package digitalocean
import (
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*digitaloceanMetrics)(nil)
type digitaloceanMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
// Register implements discovery.DiscovererMetrics.
func (m *digitaloceanMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *digitaloceanMetrics) Unregister() {}

View file

@ -0,0 +1,28 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package discovery
// Create a dummy metrics struct, because this SD doesn't have any metrics.
type NoopDiscovererMetrics struct{}
var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil)
// Register implements discovery.DiscovererMetrics.
func (*NoopDiscovererMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (*NoopDiscovererMetrics) Unregister() {
}

View file

@ -39,24 +39,47 @@ type Discoverer interface {
Run(ctx context.Context, up chan<- []*targetgroup.Group) Run(ctx context.Context, up chan<- []*targetgroup.Group)
} }
// Internal metrics of service discovery mechanisms.
type DiscovererMetrics interface {
Register() error
Unregister()
}
// DiscovererOptions provides options for a Discoverer. // DiscovererOptions provides options for a Discoverer.
type DiscovererOptions struct { type DiscovererOptions struct {
Logger log.Logger Logger log.Logger
// A registerer for the Discoverer's metrics. Metrics DiscovererMetrics
// Some Discoverers may ignore this registerer and use the global one instead.
// For now this will work, because the Prometheus `main` function uses the global registry.
// However, in the future the Prometheus `main` function will be updated to not use the global registry.
// Hence, if a discoverer wants its metrics to be visible via the Prometheus executable's
// `/metrics` endpoint, it should use this explicit registerer.
// TODO(ptodev): Update this comment once the Prometheus `main` function does not use the global registry.
Registerer prometheus.Registerer
// Extra HTTP client options to expose to Discoverers. This field may be // Extra HTTP client options to expose to Discoverers. This field may be
// ignored; Discoverer implementations must opt-in to reading it. // ignored; Discoverer implementations must opt-in to reading it.
HTTPClientOptions []config.HTTPClientOption HTTPClientOptions []config.HTTPClientOption
} }
// Metrics used by the "refresh" package.
// We define them here in the "discovery" package in order to avoid a cyclic dependency between
// "discovery" and "refresh".
type RefreshMetrics struct {
Failures prometheus.Counter
Duration prometheus.Observer
}
// Instantiate the metrics used by the "refresh" package.
type RefreshMetricsInstantiator interface {
Instantiate(mech string) *RefreshMetrics
}
// An interface for registering, unregistering, and instantiating metrics for the "refresh" package.
// Refresh metrics are registered and unregistered outside of the service discovery mechanism.
// This is so that the same metrics can be reused across different service discovery mechanisms.
// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are
// specific to that SD. However, doing so would also expose too many unused metrics on
// the Prometheus /metrics endpoint.
type RefreshMetricsManager interface {
DiscovererMetrics
RefreshMetricsInstantiator
}
// A Config provides the configuration and constructor for a Discoverer. // A Config provides the configuration and constructor for a Discoverer.
type Config interface { type Config interface {
// Name returns the name of the discovery mechanism. // Name returns the name of the discovery mechanism.
@ -65,6 +88,9 @@ type Config interface {
// NewDiscoverer returns a Discoverer for the Config // NewDiscoverer returns a Discoverer for the Config
// with the given DiscovererOptions. // with the given DiscovererOptions.
NewDiscoverer(DiscovererOptions) (Discoverer, error) NewDiscoverer(DiscovererOptions) (Discoverer, error)
// NewDiscovererMetrics returns the metrics used by the service discovery.
NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics
} }
// Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling // Configs is a slice of Config values that uses custom YAML marshaling and unmarshaling
@ -119,6 +145,11 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) {
return staticDiscoverer(c), nil return staticDiscoverer(c), nil
} }
// No metrics are needed for this service discovery mechanism.
func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics {
return &NoopDiscovererMetrics{}
}
type staticDiscoverer []*targetgroup.Group type staticDiscoverer []*targetgroup.Group
func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) { func (c staticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.Group) {

View file

@ -18,6 +18,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -67,12 +68,17 @@ type SDConfig struct {
Port int `yaml:"port"` // Ignored for SRV records Port int `yaml:"port"` // Ignored for SRV records
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return newDiscovererMetrics(reg, rmi)
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "dns" } func (*SDConfig) Name() string { return "dns" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(*c, opts.Logger, opts.Registerer) return NewDiscovery(*c, opts.Logger, opts.Metrics)
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -102,18 +108,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// the Discoverer interface. // the Discoverer interface.
type Discovery struct { type Discovery struct {
*refresh.Discovery *refresh.Discovery
names []string names []string
port int port int
qtype uint16 qtype uint16
logger log.Logger logger log.Logger
dnsSDLookupsCount prometheus.Counter metrics *dnsMetrics
dnsSDLookupFailuresCount prometheus.Counter
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -137,28 +147,16 @@ func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (
port: conf.Port, port: conf.Port,
logger: logger, logger: logger,
lookupFn: lookupWithSearchPath, lookupFn: lookupWithSearchPath,
dnsSDLookupsCount: prometheus.NewCounter( metrics: m,
prometheus.CounterOpts{
Namespace: namespace,
Name: "sd_dns_lookups_total",
Help: "The number of DNS-SD lookups.",
}),
dnsSDLookupFailuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Name: "sd_dns_lookup_failures_total",
Help: "The number of DNS-SD lookup failures.",
}),
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "dns", Mech: "dns",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: prometheus.NewRegistry(), MetricsInstantiator: m.refreshMetrics,
Metrics: []prometheus.Collector{d.dnsSDLookupsCount, d.dnsSDLookupFailuresCount},
}, },
) )
@ -195,15 +193,15 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error { func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targetgroup.Group) error {
response, err := d.lookupFn(name, d.qtype, d.logger) response, err := d.lookupFn(name, d.qtype, d.logger)
d.dnsSDLookupsCount.Inc() d.metrics.dnsSDLookupsCount.Inc()
if err != nil { if err != nil {
d.dnsSDLookupFailuresCount.Inc() d.metrics.dnsSDLookupFailuresCount.Inc()
return err return err
} }
tg := &targetgroup.Group{} tg := &targetgroup.Group{}
hostPort := func(a string, p int) model.LabelValue { hostPort := func(a string, p int) model.LabelValue {
return model.LabelValue(net.JoinHostPort(a, fmt.Sprintf("%d", p))) return model.LabelValue(net.JoinHostPort(a, strconv.Itoa(p)))
} }
for _, record := range response.Answer { for _, record := range response.Answer {
@ -212,7 +210,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
switch addr := record.(type) { switch addr := record.(type) {
case *dns.SRV: case *dns.SRV:
dnsSrvRecordTarget = model.LabelValue(addr.Target) dnsSrvRecordTarget = model.LabelValue(addr.Target)
dnsSrvRecordPort = model.LabelValue(fmt.Sprintf("%d", addr.Port)) dnsSrvRecordPort = model.LabelValue(strconv.Itoa(int(addr.Port)))
// Remove the final dot from rooted DNS names to make them look more usual. // Remove the final dot from rooted DNS names to make them look more usual.
addr.Target = strings.TrimRight(addr.Target, ".") addr.Target = strings.TrimRight(addr.Target, ".")

View file

@ -28,6 +28,7 @@ import (
"go.uber.org/goleak" "go.uber.org/goleak"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -253,13 +254,21 @@ func TestDNS(t *testing.T) {
tc := tc tc := tc
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
sd, err := NewDiscovery(tc.config, nil, prometheus.NewRegistry())
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := tc.config.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
sd, err := NewDiscovery(tc.config, nil, metrics)
require.NoError(t, err) require.NoError(t, err)
sd.lookupFn = tc.lookup sd.lookupFn = tc.lookup
tgs, err := sd.refresh(context.Background()) tgs, err := sd.refresh(context.Background())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.expected, tgs) require.Equal(t, tc.expected, tgs)
metrics.Unregister()
}) })
} }
} }

66
discovery/dns/metrics.go Normal file
View file

@ -0,0 +1,66 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dns
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*dnsMetrics)(nil)
type dnsMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
dnsSDLookupsCount prometheus.Counter
dnsSDLookupFailuresCount prometheus.Counter
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &dnsMetrics{
refreshMetrics: rmi,
dnsSDLookupsCount: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Name: "sd_dns_lookups_total",
Help: "The number of DNS-SD lookups.",
}),
dnsSDLookupFailuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Name: "sd_dns_lookup_failures_total",
Help: "The number of DNS-SD lookup failures.",
}),
}
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
m.dnsSDLookupsCount,
m.dnsSDLookupFailuresCount,
})
return m
}
// Register implements discovery.DiscovererMetrics.
func (m *dnsMetrics) Register() error {
return m.metricRegisterer.RegisterMetrics()
}
// Unregister implements discovery.DiscovererMetrics.
func (m *dnsMetrics) Unregister() {
m.metricRegisterer.UnregisterMetrics()
}

View file

@ -81,7 +81,7 @@ const appListPath string = "/apps"
func fetchApps(ctx context.Context, server string, client *http.Client) (*Applications, error) { func fetchApps(ctx context.Context, server string, client *http.Client) (*Applications, error) {
url := fmt.Sprintf("%s%s", server, appListPath) url := fmt.Sprintf("%s%s", server, appListPath)
request, err := http.NewRequest("GET", url, nil) request, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -16,6 +16,7 @@ package eureka
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -76,12 +77,19 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &eurekaMetrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "eureka" } func (*SDConfig) Name() string { return "eureka" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.Metrics)
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -118,7 +126,12 @@ type Discovery struct {
} }
// NewDiscovery creates a new Eureka discovery for the given role. // NewDiscovery creates a new Eureka discovery for the given role.
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
if err != nil { if err != nil {
return nil, err return nil, err
@ -130,11 +143,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "eureka", Mech: "eureka",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
) )
return d, nil return d, nil
@ -215,7 +228,6 @@ func targetsForApp(app *Application) []model.LabelSet {
} }
targets = append(targets, target) targets = append(targets, target)
} }
return targets return targets
} }

View file

@ -24,6 +24,7 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -36,7 +37,17 @@ func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, err
Server: ts.URL, Server: ts.URL,
} }
md, err := NewDiscovery(&conf, nil, prometheus.NewRegistry()) reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
err := metrics.Register()
if err != nil {
return nil, err
}
defer metrics.Unregister()
defer refreshMetrics.Unregister()
md, err := NewDiscovery(&conf, nil, metrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eureka
import (
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*eurekaMetrics)(nil)
type eurekaMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
// Register implements discovery.DiscovererMetrics.
func (m *eurekaMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *eurekaMetrics) Unregister() {}

View file

@ -57,12 +57,17 @@ type SDConfig struct {
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return newDiscovererMetrics(reg, rmi)
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "file" } func (*SDConfig) Name() string { return "file" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.Metrics)
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -94,6 +99,9 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
const fileSDFilepathLabel = model.MetaLabelPrefix + "filepath" const fileSDFilepathLabel = model.MetaLabelPrefix + "filepath"
// TimestampCollector is a Custom Collector for Timestamps of the files. // TimestampCollector is a Custom Collector for Timestamps of the files.
// TODO(ptodev): Now that each file SD has its own TimestampCollector
// inside discovery/file/metrics.go, we can refactor this collector
// (or get rid of it) as each TimestampCollector instance will only use one discoverer.
type TimestampCollector struct { type TimestampCollector struct {
Description *prometheus.Desc Description *prometheus.Desc
discoverers map[*Discovery]struct{} discoverers map[*Discovery]struct{}
@ -169,16 +177,16 @@ type Discovery struct {
lastRefresh map[string]int lastRefresh map[string]int
logger log.Logger logger log.Logger
fileSDReadErrorsCount prometheus.Counter metrics *fileMetrics
fileSDScanDuration prometheus.Summary
fileWatcherErrorsCount prometheus.Counter
fileSDTimeStamp *TimestampCollector
metricRegisterer discovery.MetricRegisterer
} }
// NewDiscovery returns a new file discovery for the given paths. // NewDiscovery returns a new file discovery for the given paths.
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -188,33 +196,10 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
interval: time.Duration(conf.RefreshInterval), interval: time.Duration(conf.RefreshInterval),
timestamps: make(map[string]float64), timestamps: make(map[string]float64),
logger: logger, logger: logger,
fileSDReadErrorsCount: prometheus.NewCounter( metrics: fm,
prometheus.CounterOpts{
Name: "prometheus_sd_file_read_errors_total",
Help: "The number of File-SD read errors.",
}),
fileSDScanDuration: prometheus.NewSummary(
prometheus.SummaryOpts{
Name: "prometheus_sd_file_scan_duration_seconds",
Help: "The duration of the File-SD scan in seconds.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
fileWatcherErrorsCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_file_watcher_errors_total",
Help: "The number of File-SD errors caused by filesystem watch failures.",
}),
fileSDTimeStamp: NewTimestampCollector(),
} }
disc.fileSDTimeStamp.addDiscoverer(disc) fm.init(disc)
disc.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
disc.fileSDReadErrorsCount,
disc.fileSDScanDuration,
disc.fileWatcherErrorsCount,
disc.fileSDTimeStamp,
})
return disc, nil return disc, nil
} }
@ -253,17 +238,10 @@ func (d *Discovery) watchFiles() {
// Run implements the Discoverer interface. // Run implements the Discoverer interface.
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
err := d.metricRegisterer.RegisterMetrics()
if err != nil {
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
return
}
defer d.metricRegisterer.UnregisterMetrics()
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
d.fileWatcherErrorsCount.Inc() d.metrics.fileWatcherErrorsCount.Inc()
return return
} }
d.watcher = watcher d.watcher = watcher
@ -327,7 +305,7 @@ func (d *Discovery) stop() {
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
d.fileSDTimeStamp.removeDiscoverer(d) d.metrics.fileSDTimeStamp.removeDiscoverer(d)
// Closing the watcher will deadlock unless all events and errors are drained. // Closing the watcher will deadlock unless all events and errors are drained.
go func() { go func() {
@ -353,13 +331,13 @@ func (d *Discovery) stop() {
func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group) {
t0 := time.Now() t0 := time.Now()
defer func() { defer func() {
d.fileSDScanDuration.Observe(time.Since(t0).Seconds()) d.metrics.fileSDScanDuration.Observe(time.Since(t0).Seconds())
}() }()
ref := map[string]int{} ref := map[string]int{}
for _, p := range d.listFiles() { for _, p := range d.listFiles() {
tgroups, err := d.readFile(p) tgroups, err := d.readFile(p)
if err != nil { if err != nil {
d.fileSDReadErrorsCount.Inc() d.metrics.fileSDReadErrorsCount.Inc()
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
// Prevent deletion down below. // Prevent deletion down below.

View file

@ -29,6 +29,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -144,19 +145,28 @@ func (t *testRunner) run(files ...string) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
t.cancelSD = cancel t.cancelSD = cancel
go func() { go func() {
conf := &SDConfig{
Files: files,
// Setting a high refresh interval to make sure that the tests only
// rely on file watches.
RefreshInterval: model.Duration(1 * time.Hour),
}
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
d, err := NewDiscovery( d, err := NewDiscovery(
&SDConfig{ conf,
Files: files,
// Setting a high refresh interval to make sure that the tests only
// rely on file watches.
RefreshInterval: model.Duration(1 * time.Hour),
},
nil, nil,
prometheus.NewRegistry(), metrics,
) )
require.NoError(t, err) require.NoError(t, err)
d.Run(ctx, t.ch) d.Run(ctx, t.ch)
metrics.Unregister()
}() }()
} }
@ -193,11 +203,11 @@ func (t *testRunner) targets() []*targetgroup.Group {
func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group) { func (t *testRunner) requireUpdate(ref time.Time, expected []*targetgroup.Group) {
t.Helper() t.Helper()
timeout := time.After(defaultWait)
for { for {
select { select {
case <-time.After(defaultWait): case <-timeout:
t.Fatalf("Expected update but got none") t.Fatalf("Expected update but got none")
return
case <-time.After(defaultWait / 10): case <-time.After(defaultWait / 10):
if ref.Equal(t.lastReceive()) { if ref.Equal(t.lastReceive()) {
// No update received. // No update received.
@ -347,9 +357,7 @@ func TestInvalidFile(t *testing.T) {
// Verify that we've received nothing. // Verify that we've received nothing.
time.Sleep(defaultWait) time.Sleep(defaultWait)
if runner.lastReceive().After(now) { require.False(t, runner.lastReceive().After(now), "unexpected targets received: %v", runner.targets())
t.Fatalf("unexpected targets received: %v", runner.targets())
}
}) })
} }
} }

76
discovery/file/metrics.go Normal file
View file

@ -0,0 +1,76 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package file
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*fileMetrics)(nil)
type fileMetrics struct {
fileSDReadErrorsCount prometheus.Counter
fileSDScanDuration prometheus.Summary
fileWatcherErrorsCount prometheus.Counter
fileSDTimeStamp *TimestampCollector
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
fm := &fileMetrics{
fileSDReadErrorsCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_file_read_errors_total",
Help: "The number of File-SD read errors.",
}),
fileSDScanDuration: prometheus.NewSummary(
prometheus.SummaryOpts{
Name: "prometheus_sd_file_scan_duration_seconds",
Help: "The duration of the File-SD scan in seconds.",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}),
fileWatcherErrorsCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_file_watcher_errors_total",
Help: "The number of File-SD errors caused by filesystem watch failures.",
}),
fileSDTimeStamp: NewTimestampCollector(),
}
fm.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
fm.fileSDReadErrorsCount,
fm.fileSDScanDuration,
fm.fileWatcherErrorsCount,
fm.fileSDTimeStamp,
})
return fm
}
// Register implements discovery.DiscovererMetrics.
func (fm *fileMetrics) Register() error {
return fm.metricRegisterer.RegisterMetrics()
}
// Unregister implements discovery.DiscovererMetrics.
func (fm *fileMetrics) Unregister() {
fm.metricRegisterer.UnregisterMetrics()
}
func (fm *fileMetrics) init(disc *Discovery) {
fm.fileSDTimeStamp.addDiscoverer(disc)
}

View file

@ -82,12 +82,19 @@ type SDConfig struct {
TagSeparator string `yaml:"tag_separator,omitempty"` TagSeparator string `yaml:"tag_separator,omitempty"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &gceMetrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "gce" } func (*SDConfig) Name() string { return "gce" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(*c, opts.Logger, opts.Registerer) return NewDiscovery(*c, opts.Logger, opts.Metrics)
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.
@ -122,7 +129,12 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
d := &Discovery{ d := &Discovery{
project: conf.Project, project: conf.Project,
zone: conf.Zone, zone: conf.Zone,
@ -143,11 +155,11 @@ func NewDiscovery(conf SDConfig, logger log.Logger, reg prometheus.Registerer) (
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "gce", Mech: "gce",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
) )
return d, nil return d, nil

32
discovery/gce/metrics.go Normal file
View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gce
import (
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*gceMetrics)(nil)
type gceMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
// Register implements discovery.DiscovererMetrics.
func (m *gceMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *gceMetrics) Unregister() {}

View file

@ -15,7 +15,6 @@ package hetzner
import ( import (
"context" "context"
"fmt"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
@ -92,7 +91,7 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
for i, server := range servers { for i, server := range servers {
labels := model.LabelSet{ labels := model.LabelSet{
hetznerLabelRole: model.LabelValue(HetznerRoleHcloud), hetznerLabelRole: model.LabelValue(HetznerRoleHcloud),
hetznerLabelServerID: model.LabelValue(fmt.Sprintf("%d", server.ID)), hetznerLabelServerID: model.LabelValue(strconv.FormatInt(server.ID, 10)),
hetznerLabelServerName: model.LabelValue(server.Name), hetznerLabelServerName: model.LabelValue(server.Name),
hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name), hetznerLabelDatacenter: model.LabelValue(server.Datacenter.Name),
hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()), hetznerLabelPublicIPv4: model.LabelValue(server.PublicNet.IPv4.IP.String()),
@ -102,10 +101,10 @@ func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name), hetznerLabelHcloudDatacenterLocation: model.LabelValue(server.Datacenter.Location.Name),
hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone), hetznerLabelHcloudDatacenterLocationNetworkZone: model.LabelValue(server.Datacenter.Location.NetworkZone),
hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name), hetznerLabelHcloudType: model.LabelValue(server.ServerType.Name),
hetznerLabelHcloudCPUCores: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Cores)), hetznerLabelHcloudCPUCores: model.LabelValue(strconv.Itoa(server.ServerType.Cores)),
hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType), hetznerLabelHcloudCPUType: model.LabelValue(server.ServerType.CPUType),
hetznerLabelHcloudMemoryGB: model.LabelValue(fmt.Sprintf("%d", int(server.ServerType.Memory))), hetznerLabelHcloudMemoryGB: model.LabelValue(strconv.Itoa(int(server.ServerType.Memory))),
hetznerLabelHcloudDiskGB: model.LabelValue(fmt.Sprintf("%d", server.ServerType.Disk)), hetznerLabelHcloudDiskGB: model.LabelValue(strconv.Itoa(server.ServerType.Disk)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.PublicNet.IPv4.IP.String(), strconv.FormatUint(uint64(d.port), 10))),
} }

View file

@ -63,12 +63,19 @@ type SDConfig struct {
robotEndpoint string // For tests only. robotEndpoint string // For tests only.
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &hetznerMetrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "hetzner" } func (*SDConfig) Name() string { return "hetzner" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.Metrics)
} }
type refresher interface { type refresher interface {
@ -128,7 +135,12 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
r, err := newRefresher(conf, logger) r, err := newRefresher(conf, logger)
if err != nil { if err != nil {
return nil, err return nil, err
@ -136,11 +148,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
return refresh.NewDiscovery( return refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "hetzner", Mech: "hetzner",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: r.refresh, RefreshF: r.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
), nil ), nil
} }

View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hetzner
import (
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*hetznerMetrics)(nil)
type hetznerMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
// Register implements discovery.DiscovererMetrics.
func (m *hetznerMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *hetznerMetrics) Unregister() {}

View file

@ -70,7 +70,7 @@ func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
} }
func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) { func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.endpoint+"/server", nil) req, err := http.NewRequest(http.MethodGet, d.endpoint+"/server", nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -112,7 +112,7 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP), hetznerLabelPublicIPv4: model.LabelValue(server.Server.ServerIP),
hetznerLabelServerStatus: model.LabelValue(server.Server.Status), hetznerLabelServerStatus: model.LabelValue(server.Server.Status),
hetznerLabelRobotProduct: model.LabelValue(server.Server.Product), hetznerLabelRobotProduct: model.LabelValue(server.Server.Product),
hetznerLabelRobotCancelled: model.LabelValue(fmt.Sprintf("%t", server.Server.Canceled)), hetznerLabelRobotCancelled: model.LabelValue(strconv.FormatBool(server.Server.Canceled)),
model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))), model.AddressLabel: model.LabelValue(net.JoinHostPort(server.Server.ServerIP, strconv.FormatUint(uint64(d.port), 10))),
} }
@ -122,7 +122,6 @@ func (d *robotDiscovery) refresh(context.Context) ([]*targetgroup.Group, error)
labels[hetznerLabelPublicIPv6Network] = model.LabelValue(fmt.Sprintf("%s/%s", subnet.IP, subnet.Mask)) labels[hetznerLabelPublicIPv6Network] = model.LabelValue(fmt.Sprintf("%s/%s", subnet.IP, subnet.Mask))
break break
} }
} }
targets[i] = labels targets[i] = labels
} }

View file

@ -58,12 +58,17 @@ type SDConfig struct {
URL string `yaml:"url"` URL string `yaml:"url"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return newDiscovererMetrics(reg, rmi)
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "http" } func (*SDConfig) Name() string { return "http" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Registerer) return NewDiscovery(c, opts.Logger, opts.HTTPClientOptions, opts.Metrics)
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -105,11 +110,16 @@ type Discovery struct {
client *http.Client client *http.Client
refreshInterval time.Duration refreshInterval time.Duration
tgLastLength int tgLastLength int
failuresCount prometheus.Counter metrics *httpMetrics
} }
// NewDiscovery returns a new HTTP discovery for the given config. // NewDiscovery returns a new HTTP discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, reg prometheus.Registerer) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -124,28 +134,23 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPCli
url: conf.URL, url: conf.URL,
client: client, client: client,
refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers. refreshInterval: time.Duration(conf.RefreshInterval), // Stored to be sent as headers.
failuresCount: prometheus.NewCounter( metrics: m,
prometheus.CounterOpts{
Name: "prometheus_sd_http_failures_total",
Help: "Number of HTTP service discovery refresh failures.",
}),
} }
d.Discovery = refresh.NewDiscovery( d.Discovery = refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "http", Mech: "http",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: d.Refresh, RefreshF: d.Refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
Metrics: []prometheus.Collector{d.failuresCount},
}, },
) )
return d, nil return d, nil
} }
func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
req, err := http.NewRequest("GET", d.url, nil) req, err := http.NewRequest(http.MethodGet, d.url, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -155,7 +160,7 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
resp, err := d.client.Do(req.WithContext(ctx)) resp, err := d.client.Do(req.WithContext(ctx))
if err != nil { if err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, err return nil, err
} }
defer func() { defer func() {
@ -164,31 +169,31 @@ func (d *Discovery) Refresh(ctx context.Context) ([]*targetgroup.Group, error) {
}() }()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("server returned HTTP status %s", resp.Status) return nil, fmt.Errorf("server returned HTTP status %s", resp.Status)
} }
if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) { if !matchContentType.MatchString(strings.TrimSpace(resp.Header.Get("Content-Type"))) {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type")) return nil, fmt.Errorf("unsupported content type %q", resp.Header.Get("Content-Type"))
} }
b, err := io.ReadAll(resp.Body) b, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, err return nil, err
} }
var targetGroups []*targetgroup.Group var targetGroups []*targetgroup.Group
if err := json.Unmarshal(b, &targetGroups); err != nil { if err := json.Unmarshal(b, &targetGroups); err != nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
return nil, err return nil, err
} }
for i, tg := range targetGroups { for i, tg := range targetGroups {
if tg == nil { if tg == nil {
d.failuresCount.Inc() d.metrics.failuresCount.Inc()
err = errors.New("nil target group item found") err = errors.New("nil target group item found")
return nil, err return nil, err
} }

View file

@ -28,6 +28,7 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -41,7 +42,14 @@ func TestHTTPValidRefresh(t *testing.T) {
RefreshInterval: model.Duration(30 * time.Second), RefreshInterval: model.Duration(30 * time.Second),
} }
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
defer refreshMetrics.Unregister()
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -63,7 +71,7 @@ func TestHTTPValidRefresh(t *testing.T) {
}, },
} }
require.Equal(t, expectedTargets, tgs) require.Equal(t, expectedTargets, tgs)
require.Equal(t, 0.0, getFailureCount(d.failuresCount)) require.Equal(t, 0.0, getFailureCount(d.metrics.failuresCount))
} }
func TestHTTPInvalidCode(t *testing.T) { func TestHTTPInvalidCode(t *testing.T) {
@ -79,13 +87,20 @@ func TestHTTPInvalidCode(t *testing.T) {
RefreshInterval: model.Duration(30 * time.Second), RefreshInterval: model.Duration(30 * time.Second),
} }
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
defer refreshMetrics.Unregister()
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
_, err = d.Refresh(ctx) _, err = d.Refresh(ctx)
require.EqualError(t, err, "server returned HTTP status 400 Bad Request") require.EqualError(t, err, "server returned HTTP status 400 Bad Request")
require.Equal(t, 1.0, getFailureCount(d.failuresCount)) require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount))
} }
func TestHTTPInvalidFormat(t *testing.T) { func TestHTTPInvalidFormat(t *testing.T) {
@ -101,13 +116,20 @@ func TestHTTPInvalidFormat(t *testing.T) {
RefreshInterval: model.Duration(30 * time.Second), RefreshInterval: model.Duration(30 * time.Second),
} }
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry()) reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
defer refreshMetrics.Unregister()
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
_, err = d.Refresh(ctx) _, err = d.Refresh(ctx)
require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`) require.EqualError(t, err, `unsupported content type "text/plain; charset=utf-8"`)
require.Equal(t, 1.0, getFailureCount(d.failuresCount)) require.Equal(t, 1.0, getFailureCount(d.metrics.failuresCount))
} }
func getFailureCount(failuresCount prometheus.Counter) float64 { func getFailureCount(failuresCount prometheus.Counter) float64 {
@ -412,7 +434,15 @@ func TestSourceDisappeared(t *testing.T) {
URL: ts.URL, URL: ts.URL,
RefreshInterval: model.Duration(1 * time.Second), RefreshInterval: model.Duration(1 * time.Second),
} }
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, prometheus.NewRegistry())
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
defer refreshMetrics.Unregister()
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register())
defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
for _, test := range cases { for _, test := range cases {
ctx := context.Background() ctx := context.Background()

57
discovery/http/metrics.go Normal file
View file

@ -0,0 +1,57 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*httpMetrics)(nil)
type httpMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
failuresCount prometheus.Counter
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &httpMetrics{
refreshMetrics: rmi,
failuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Name: "prometheus_sd_http_failures_total",
Help: "Number of HTTP service discovery refresh failures.",
}),
}
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
m.failuresCount,
})
return m
}
// Register implements discovery.DiscovererMetrics.
func (m *httpMetrics) Register() error {
return m.metricRegisterer.RegisterMetrics()
}
// Unregister implements discovery.DiscovererMetrics.
func (m *httpMetrics) Unregister() {
m.metricRegisterer.UnregisterMetrics()
}

View file

@ -15,16 +15,16 @@ package ionos
import ( import (
"errors" "errors"
"fmt"
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/client_golang/prometheus"
) )
const ( const (
@ -43,7 +43,12 @@ func init() {
type Discovery struct{} type Discovery struct{}
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud. // NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if conf.ionosEndpoint == "" { if conf.ionosEndpoint == "" {
conf.ionosEndpoint = "https://api.ionos.com" conf.ionosEndpoint = "https://api.ionos.com"
} }
@ -55,11 +60,11 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, reg prometheus.Registerer)
return refresh.NewDiscovery( return refresh.NewDiscovery(
refresh.Options{ refresh.Options{
Logger: logger, Logger: logger,
Mech: "ionos", Mech: "ionos",
Interval: time.Duration(conf.RefreshInterval), Interval: time.Duration(conf.RefreshInterval),
RefreshF: d.refresh, RefreshF: d.refresh,
Registry: reg, MetricsInstantiator: m.refreshMetrics,
}, },
), nil ), nil
} }
@ -84,6 +89,13 @@ type SDConfig struct {
ionosEndpoint string // For tests only. ionosEndpoint string // For tests only.
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &ionosMetrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the IONOS Cloud service discovery. // Name returns the name of the IONOS Cloud service discovery.
func (c SDConfig) Name() string { func (c SDConfig) Name() string {
return "ionos" return "ionos"
@ -91,7 +103,7 @@ func (c SDConfig) Name() string {
// NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud. // NewDiscoverer returns a new discovery.Discoverer for IONOS Cloud.
func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c SDConfig) NewDiscoverer(options discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(&c, options.Logger, options.Registerer) return NewDiscovery(&c, options.Logger, options.Metrics)
} }
// UnmarshalYAML implements the yaml.Unmarshaler interface. // UnmarshalYAML implements the yaml.Unmarshaler interface.

View file

@ -0,0 +1,32 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ionos
import (
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*ionosMetrics)(nil)
type ionosMetrics struct {
refreshMetrics discovery.RefreshMetricsInstantiator
}
// Register implements discovery.DiscovererMetrics.
func (m *ionosMetrics) Register() error {
return nil
}
// Unregister implements discovery.DiscovererMetrics.
func (m *ionosMetrics) Unregister() {}

View file

@ -62,6 +62,8 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate) svcUpdateCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleUpdate)
svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete) svcDeleteCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleDelete)
podUpdateCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleUpdate)
e := &Endpoints{ e := &Endpoints{
logger: l, logger: l,
endpointsInf: eps, endpointsInf: eps,
@ -131,6 +133,29 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err) level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
} }
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) {
podUpdateCount.Inc()
oldPod, ok := old.(*apiv1.Pod)
if !ok {
return
}
curPod, ok := cur.(*apiv1.Pod)
if !ok {
return
}
// the Pod's phase may change without triggering an update on the Endpoints/Service.
// https://github.com/prometheus/prometheus/issues/11305.
if curPod.Status.Phase != oldPod.Status.Phase {
e.enqueuePod(namespacedName(curPod.Namespace, curPod.Name))
}
},
})
if err != nil {
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
}
if e.withNodeMetadata { if e.withNodeMetadata {
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(o interface{}) { AddFunc: func(o interface{}) {
@ -166,6 +191,18 @@ func (e *Endpoints) enqueueNode(nodeName string) {
} }
} }
func (e *Endpoints) enqueuePod(podNamespacedName string) {
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
if err != nil {
level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
return
}
for _, endpoint := range endpoints {
e.enqueue(endpoint)
}
}
func (e *Endpoints) enqueue(obj interface{}) { func (e *Endpoints) enqueue(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
if err != nil { if err != nil {
@ -312,7 +349,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
tg.Targets = append(tg.Targets, target) tg.Targets = append(tg.Targets, target)
return return
} }
s := pod.Namespace + "/" + pod.Name s := namespacedName(pod.Namespace, pod.Name)
sp, ok := seenPods[s] sp, ok := seenPods[s]
if !ok { if !ok {
@ -369,6 +406,11 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
// For all seen pods, check all container ports. If they were not covered // For all seen pods, check all container ports. If they were not covered
// by one of the service endpoints, generate targets for them. // by one of the service endpoints, generate targets for them.
for _, pe := range seenPods { for _, pe := range seenPods {
// PodIP can be empty when a pod is starting or has been evicted.
if len(pe.pod.Status.PodIP) == 0 {
continue
}
for _, c := range pe.pod.Spec.Containers { for _, c := range pe.pod.Spec.Containers {
for _, cport := range c.Ports { for _, cport := range c.Ports {
hasSeenPort := func() bool { hasSeenPort := func() bool {
@ -383,21 +425,18 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
continue continue
} }
// PodIP can be empty when a pod is starting or has been evicted. a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
if len(pe.pod.Status.PodIP) != 0 { ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
target := model.LabelSet{ target := model.LabelSet{
model.AddressLabel: lv(a), model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
podContainerImageLabel: lv(c.Image), podContainerImageLabel: lv(c.Image),
podContainerPortNameLabel: lv(cport.Name), podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerPortProtocolLabel: lv(string(cport.Protocol)),
}
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
} }
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
} }
} }
} }

View file

@ -969,3 +969,123 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
expectedRes: map[string]*targetgroup.Group{}, expectedRes: map[string]*targetgroup.Group{},
}.Run(t) }.Run(t)
} }
// TestEndpointsUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "default",
UID: types.UID("deadbeef"),
},
Spec: v1.PodSpec{
NodeName: "testnode",
Containers: []v1.Container{
{
Name: "c1",
Image: "c1:latest",
Ports: []v1.ContainerPort{
{
Name: "mainport",
ContainerPort: 9000,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
Status: v1.PodStatus{
// Pod is in Pending phase when discovered for first time.
Phase: "Pending",
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionFalse,
},
},
HostIP: "2.3.4.5",
PodIP: "4.3.2.1",
},
}
objs := []runtime.Object{
&v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "testendpoints",
Namespace: "default",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: "4.3.2.1",
// The Pending Pod may be included because the Endpoints was created manually.
// Or because the corresponding service has ".spec.publishNotReadyAddresses: true".
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Name: "testpod",
Namespace: "default",
},
},
},
Ports: []v1.EndpointPort{
{
Name: "mainport",
Port: 9000,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
pod,
}
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...)
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
// the Pod becomes Ready.
pod.Status.Phase = "Running"
pod.Status.Conditions = []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
}
c.CoreV1().Pods(pod.Namespace).Update(context.Background(), pod, metav1.UpdateOptions{})
},
expectedMaxItems: 2,
expectedRes: map[string]*targetgroup.Group{
"endpoints/default/testendpoints": {
Targets: []model.LabelSet{
{
"__address__": "4.3.2.1:9000",
"__meta_kubernetes_endpoint_port_name": "mainport",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "testpod",
"__meta_kubernetes_pod_name": "testpod",
"__meta_kubernetes_pod_ip": "4.3.2.1",
"__meta_kubernetes_pod_ready": "true",
"__meta_kubernetes_pod_phase": "Running",
"__meta_kubernetes_pod_node_name": "testnode",
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
"__meta_kubernetes_pod_container_name": "c1",
"__meta_kubernetes_pod_container_image": "c1:latest",
"__meta_kubernetes_pod_container_port_name": "mainport",
"__meta_kubernetes_pod_container_port_number": "9000",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_uid": "deadbeef",
},
},
Labels: model.LabelSet{
"__meta_kubernetes_namespace": "default",
"__meta_kubernetes_endpoints_name": "testendpoints",
},
Source: "endpoints/default/testendpoints",
},
},
}.Run(t)
}

View file

@ -265,7 +265,9 @@ const (
endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready" endpointSliceEndpointConditionsReadyLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_ready"
endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving" endpointSliceEndpointConditionsServingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_serving"
endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating" endpointSliceEndpointConditionsTerminatingLabel = metaLabelPrefix + "endpointslice_endpoint_conditions_terminating"
endpointSliceEndpointZoneLabel = metaLabelPrefix + "endpointslice_endpoint_zone"
endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname" endpointSliceEndpointHostnameLabel = metaLabelPrefix + "endpointslice_endpoint_hostname"
endpointSliceEndpointNodenameLabel = metaLabelPrefix + "endpointslice_endpoint_node_name"
endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind" endpointSliceAddressTargetKindLabel = metaLabelPrefix + "endpointslice_address_target_kind"
endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name" endpointSliceAddressTargetNameLabel = metaLabelPrefix + "endpointslice_address_target_name"
endpointSliceEndpointTopologyLabelPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_" endpointSliceEndpointTopologyLabelPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_"
@ -338,6 +340,14 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.targetRef().Name) target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.targetRef().Name)
} }
if ep.nodename() != nil {
target[endpointSliceEndpointNodenameLabel] = lv(*ep.nodename())
}
if ep.zone() != nil {
target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.zone())
}
for k, v := range ep.topology() { for k, v := range ep.topology() {
ln := strutil.SanitizeLabelName(k) ln := strutil.SanitizeLabelName(k)
target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v) target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v)
@ -358,7 +368,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
tg.Targets = append(tg.Targets, target) tg.Targets = append(tg.Targets, target)
return return
} }
s := pod.Namespace + "/" + pod.Name s := namespacedName(pod.Namespace, pod.Name)
sp, ok := seenPods[s] sp, ok := seenPods[s]
if !ok { if !ok {
@ -405,6 +415,11 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
// For all seen pods, check all container ports. If they were not covered // For all seen pods, check all container ports. If they were not covered
// by one of the service endpoints, generate targets for them. // by one of the service endpoints, generate targets for them.
for _, pe := range seenPods { for _, pe := range seenPods {
// PodIP can be empty when a pod is starting or has been evicted.
if len(pe.pod.Status.PodIP) == 0 {
continue
}
for _, c := range pe.pod.Spec.Containers { for _, c := range pe.pod.Spec.Containers {
for _, cport := range c.Ports { for _, cport := range c.Ports {
hasSeenPort := func() bool { hasSeenPort := func() bool {
@ -422,21 +437,18 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
continue continue
} }
// PodIP can be empty when a pod is starting or has been evicted. a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
if len(pe.pod.Status.PodIP) != 0 { ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
target := model.LabelSet{ target := model.LabelSet{
model.AddressLabel: lv(a), model.AddressLabel: lv(a),
podContainerNameLabel: lv(c.Name), podContainerNameLabel: lv(c.Name),
podContainerImageLabel: lv(c.Image), podContainerImageLabel: lv(c.Image),
podContainerPortNameLabel: lv(cport.Name), podContainerPortNameLabel: lv(cport.Name),
podContainerPortNumberLabel: lv(ports), podContainerPortNumberLabel: lv(ports),
podContainerPortProtocolLabel: lv(string(cport.Protocol)), podContainerPortProtocolLabel: lv(string(cport.Protocol)),
}
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
} }
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
} }
} }
} }

View file

@ -44,6 +44,7 @@ type endpointSliceEndpointAdaptor interface {
addresses() []string addresses() []string
hostname() *string hostname() *string
nodename() *string nodename() *string
zone() *string
conditions() endpointSliceEndpointConditionsAdaptor conditions() endpointSliceEndpointConditionsAdaptor
targetRef() *corev1.ObjectReference targetRef() *corev1.ObjectReference
topology() map[string]string topology() map[string]string
@ -181,6 +182,10 @@ func (e *endpointSliceEndpointAdaptorV1) nodename() *string {
return e.endpoint.NodeName return e.endpoint.NodeName
} }
func (e *endpointSliceEndpointAdaptorV1) zone() *string {
return e.endpoint.Zone
}
func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor { func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor {
return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions) return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions)
} }
@ -233,6 +238,10 @@ func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string {
return e.endpoint.NodeName return e.endpoint.NodeName
} }
func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string {
return nil
}
func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor { func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor {
return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions) return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions)
} }

View file

@ -18,6 +18,7 @@ import (
"testing" "testing"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/discovery/v1" v1 "k8s.io/api/discovery/v1"
"k8s.io/api/discovery/v1beta1" "k8s.io/api/discovery/v1beta1"
@ -79,6 +80,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
DeprecatedTopology: map[string]string{ DeprecatedTopology: map[string]string{
"topology": "value", "topology": "value",
}, },
Zone: strptr("us-east-1a"),
}, { }, {
Addresses: []string{"2.3.4.5"}, Addresses: []string{"2.3.4.5"},
Conditions: v1.EndpointConditions{ Conditions: v1.EndpointConditions{
@ -86,6 +88,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
Serving: boolptr(true), Serving: boolptr(true),
Terminating: boolptr(false), Terminating: boolptr(false),
}, },
Zone: strptr("us-east-1b"),
}, { }, {
Addresses: []string{"3.4.5.6"}, Addresses: []string{"3.4.5.6"},
Conditions: v1.EndpointConditions{ Conditions: v1.EndpointConditions{
@ -93,6 +96,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
Serving: boolptr(true), Serving: boolptr(true),
Terminating: boolptr(true), Terminating: boolptr(true),
}, },
Zone: strptr("us-east-1c"),
}, { }, {
Addresses: []string{"4.5.6.7"}, Addresses: []string{"4.5.6.7"},
Conditions: v1.EndpointConditions{ Conditions: v1.EndpointConditions{
@ -104,6 +108,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
Kind: "Node", Kind: "Node",
Name: "barbaz", Name: "barbaz",
}, },
Zone: strptr("us-east-1a"),
}, },
}, },
} }
@ -184,8 +189,10 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -196,6 +203,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -206,6 +214,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -218,6 +227,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -451,8 +461,10 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -463,6 +475,7 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -473,6 +486,7 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -485,6 +499,7 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -551,8 +566,10 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -563,6 +580,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -573,6 +591,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -585,6 +604,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -640,8 +660,10 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -652,6 +674,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -662,6 +685,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -674,6 +698,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -724,8 +749,10 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -736,6 +763,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -746,6 +774,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -758,6 +787,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -824,8 +854,10 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -836,6 +868,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
@ -846,6 +879,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
@ -858,6 +892,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -914,8 +949,10 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -929,6 +966,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -939,6 +977,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -951,6 +990,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1014,8 +1054,10 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1029,6 +1071,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1039,6 +1082,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1051,6 +1095,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1160,8 +1205,10 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1172,6 +1219,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
@ -1182,6 +1230,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
@ -1194,6 +1243,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1308,8 +1358,10 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1", "__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
"__meta_kubernetes_endpointslice_endpoint_node_name": "foobar",
"__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true", "__meta_kubernetes_endpointslice_endpoint_topology_present_topology": "true",
"__meta_kubernetes_endpointslice_endpoint_topology_topology": "value", "__meta_kubernetes_endpointslice_endpoint_topology_topology": "value",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1320,6 +1372,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1b",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
@ -1330,6 +1383,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "true",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1c",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
"__meta_kubernetes_endpointslice_port_protocol": "TCP", "__meta_kubernetes_endpointslice_port_protocol": "TCP",
@ -1342,6 +1396,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true", "__meta_kubernetes_endpointslice_endpoint_conditions_serving": "true",
"__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false", "__meta_kubernetes_endpointslice_endpoint_conditions_terminating": "false",
"__meta_kubernetes_endpointslice_endpoint_zone": "us-east-1a",
"__meta_kubernetes_endpointslice_port": "9000", "__meta_kubernetes_endpointslice_port": "9000",
"__meta_kubernetes_endpointslice_port_app_protocol": "http", "__meta_kubernetes_endpointslice_port_app_protocol": "http",
"__meta_kubernetes_endpointslice_port_name": "testport", "__meta_kubernetes_endpointslice_port_name": "testport",
@ -1405,3 +1460,41 @@ func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
expectedRes: map[string]*targetgroup.Group{}, expectedRes: map[string]*targetgroup.Group{},
}.Run(t) }.Run(t)
} }
// TestEndpointSliceInfIndexersCount makes sure that RoleEndpointSlice discovery
// sets up indexing for the main Kube informer only when needed.
// See: https://github.com/prometheus/prometheus/pull/13554#discussion_r1490965817
func TestEndpointSliceInfIndexersCount(t *testing.T) {
tests := []struct {
name string
withNodeMetadata bool
}{
{"with node metadata", true},
{"without node metadata", false},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var (
n *Discovery
mainInfIndexersCount int
)
if tc.withNodeMetadata {
mainInfIndexersCount = 1
n, _ = makeDiscoveryWithMetadata(RoleEndpointSlice, NamespaceDiscovery{}, AttachMetadataConfig{Node: true})
} else {
n, _ = makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{})
}
k8sDiscoveryTest{
discovery: n,
afterStart: func() {
n.RLock()
defer n.RUnlock()
require.Len(t, n.discoverers, 1)
require.Len(t, n.discoverers[0].(*EndpointSlice).endpointSliceInf.GetIndexer().GetIndexers(), mainInfIndexersCount)
},
}.Run(t)
})
}
}

View file

@ -123,12 +123,17 @@ type SDConfig struct {
AttachMetadata AttachMetadataConfig `yaml:"attach_metadata,omitempty"` AttachMetadata AttachMetadataConfig `yaml:"attach_metadata,omitempty"`
} }
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return newDiscovererMetrics(reg, rmi)
}
// Name returns the name of the Config. // Name returns the name of the Config.
func (*SDConfig) Name() string { return "kubernetes" } func (*SDConfig) Name() string { return "kubernetes" }
// NewDiscoverer returns a Discoverer for the Config. // NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return New(opts.Logger, opts.Registerer, c) return New(opts.Logger, opts.Metrics, c)
} }
// SetDirectory joins any relative file paths with dir. // SetDirectory joins any relative file paths with dir.
@ -265,8 +270,7 @@ type Discovery struct {
selectors roleSelector selectors roleSelector
ownNamespace string ownNamespace string
attachMetadata AttachMetadataConfig attachMetadata AttachMetadataConfig
eventCount *prometheus.CounterVec metrics *kubernetesMetrics
metricRegisterer discovery.MetricRegisterer
} }
func (d *Discovery) getNamespaces() []string { func (d *Discovery) getNamespaces() []string {
@ -285,7 +289,12 @@ func (d *Discovery) getNamespaces() []string {
} }
// New creates a new Kubernetes discovery for the given role. // New creates a new Kubernetes discovery for the given role.
func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, error) { func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics)
if !ok {
return nil, fmt.Errorf("invalid discovery metrics type")
}
if l == nil { if l == nil {
l = log.NewNopLogger() l = log.NewNopLogger()
} }
@ -302,7 +311,7 @@ func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, e
} }
case conf.APIServer.URL == nil: case conf.APIServer.URL == nil:
// Use the Kubernetes provided pod service account // Use the Kubernetes provided pod service account
// as described in https://kubernetes.io/docs/admin/service-accounts-admin/ // as described in https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#using-official-client-libraries
kcfg, err = rest.InClusterConfig() kcfg, err = rest.InClusterConfig()
if err != nil { if err != nil {
return nil, err return nil, err
@ -348,34 +357,7 @@ func New(l log.Logger, reg prometheus.Registerer, conf *SDConfig) (*Discovery, e
selectors: mapSelector(conf.Selectors), selectors: mapSelector(conf.Selectors),
ownNamespace: ownNamespace, ownNamespace: ownNamespace,
attachMetadata: conf.AttachMetadata, attachMetadata: conf.AttachMetadata,
eventCount: prometheus.NewCounterVec( metrics: m,
prometheus.CounterOpts{
Namespace: discovery.KubernetesMetricsNamespace,
Name: "events_total",
Help: "The number of Kubernetes events handled.",
},
[]string{"role", "event"},
),
}
d.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{d.eventCount})
// Initialize metric vectors.
for _, role := range []string{
RoleEndpointSlice.String(),
RoleEndpoint.String(),
RoleNode.String(),
RolePod.String(),
RoleService.String(),
RoleIngress.String(),
} {
for _, evt := range []string{
MetricLabelRoleAdd,
MetricLabelRoleDelete,
MetricLabelRoleUpdate,
} {
d.eventCount.WithLabelValues(role, evt)
}
} }
return d, nil return d, nil
@ -415,13 +397,6 @@ const resyncDisabled = 0
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
d.Lock() d.Lock()
err := d.metricRegisterer.RegisterMetrics()
if err != nil {
level.Error(d.logger).Log("msg", "Unable to register metrics", "err", err.Error())
return
}
defer d.metricRegisterer.UnregisterMetrics()
namespaces := d.getNamespaces() namespaces := d.getNamespaces()
switch d.role { switch d.role {
@ -510,10 +485,10 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
eps := NewEndpointSlice( eps := NewEndpointSlice(
log.With(d.logger, "role", "endpointslice"), log.With(d.logger, "role", "endpointslice"),
informer, informer,
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
nodeInf, nodeInf,
d.eventCount, d.metrics.eventCount,
) )
d.discoverers = append(d.discoverers, eps) d.discoverers = append(d.discoverers, eps)
go eps.endpointSliceInf.Run(ctx.Done()) go eps.endpointSliceInf.Run(ctx.Done())
@ -570,10 +545,10 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
eps := NewEndpoints( eps := NewEndpoints(
log.With(d.logger, "role", "endpoint"), log.With(d.logger, "role", "endpoint"),
d.newEndpointsByNodeInformer(elw), d.newEndpointsByNodeInformer(elw),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
cache.NewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
nodeInf, nodeInf,
d.eventCount, d.metrics.eventCount,
) )
d.discoverers = append(d.discoverers, eps) d.discoverers = append(d.discoverers, eps)
go eps.endpointsInf.Run(ctx.Done()) go eps.endpointsInf.Run(ctx.Done())
@ -605,7 +580,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
log.With(d.logger, "role", "pod"), log.With(d.logger, "role", "pod"),
d.newPodsByNodeInformer(plw), d.newPodsByNodeInformer(plw),
nodeInformer, nodeInformer,
d.eventCount, d.metrics.eventCount,
) )
d.discoverers = append(d.discoverers, pod) d.discoverers = append(d.discoverers, pod)
go pod.podInf.Run(ctx.Done()) go pod.podInf.Run(ctx.Done())
@ -627,8 +602,8 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
svc := NewService( svc := NewService(
log.With(d.logger, "role", "service"), log.With(d.logger, "role", "service"),
cache.NewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
d.eventCount, d.metrics.eventCount,
) )
d.discoverers = append(d.discoverers, svc) d.discoverers = append(d.discoverers, svc)
go svc.informer.Run(ctx.Done()) go svc.informer.Run(ctx.Done())
@ -666,7 +641,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
return i.Watch(ctx, options) return i.Watch(ctx, options)
}, },
} }
informer = cache.NewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
} else { } else {
i := d.client.NetworkingV1beta1().Ingresses(namespace) i := d.client.NetworkingV1beta1().Ingresses(namespace)
ilw := &cache.ListWatch{ ilw := &cache.ListWatch{
@ -681,19 +656,19 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
return i.Watch(ctx, options) return i.Watch(ctx, options)
}, },
} }
informer = cache.NewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled)
} }
ingress := NewIngress( ingress := NewIngress(
log.With(d.logger, "role", "ingress"), log.With(d.logger, "role", "ingress"),
informer, informer,
d.eventCount, d.metrics.eventCount,
) )
d.discoverers = append(d.discoverers, ingress) d.discoverers = append(d.discoverers, ingress)
go ingress.informer.Run(ctx.Done()) go ingress.informer.Run(ctx.Done())
} }
case RoleNode: case RoleNode:
nodeInformer := d.newNodeInformer(ctx) nodeInformer := d.newNodeInformer(ctx)
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.eventCount) node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount)
d.discoverers = append(d.discoverers, node) d.discoverers = append(d.discoverers, node)
go node.informer.Run(ctx.Done()) go node.informer.Run(ctx.Done())
default: default:
@ -772,7 +747,7 @@ func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer {
return d.client.CoreV1().Nodes().Watch(ctx, options) return d.client.CoreV1().Nodes().Watch(ctx, options)
}, },
} }
return cache.NewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled) return d.mustNewSharedInformer(nlw, &apiv1.Node{}, resyncDisabled)
} }
func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
@ -787,13 +762,28 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
} }
} }
return cache.NewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, &apiv1.Pod{}, resyncDisabled, indexers)
} }
func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer { func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc) indexers := make(map[string]cache.IndexFunc)
indexers[podIndex] = func(obj interface{}) ([]string, error) {
e, ok := obj.(*apiv1.Endpoints)
if !ok {
return nil, fmt.Errorf("object is not endpoints")
}
var pods []string
for _, target := range e.Subsets {
for _, addr := range target.Addresses {
if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" {
pods = append(pods, namespacedName(addr.TargetRef.Namespace, addr.TargetRef.Name))
}
}
}
return pods, nil
}
if !d.attachMetadata.Node { if !d.attachMetadata.Node {
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
} }
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
@ -819,13 +809,13 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
return nodes, nil return nodes, nil
} }
return cache.NewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, &apiv1.Endpoints{}, resyncDisabled, indexers)
} }
func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer { func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object runtime.Object) cache.SharedIndexInformer {
indexers := make(map[string]cache.IndexFunc) indexers := make(map[string]cache.IndexFunc)
if !d.attachMetadata.Node { if !d.attachMetadata.Node {
cache.NewSharedIndexInformer(plw, &disv1.EndpointSlice{}, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers)
} }
indexers[nodeIndex] = func(obj interface{}) ([]string, error) { indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
@ -864,7 +854,32 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
return nodes, nil return nodes, nil
} }
return cache.NewSharedIndexInformer(plw, object, resyncDisabled, indexers) return d.mustNewSharedIndexInformer(plw, object, resyncDisabled, indexers)
}
func (d *Discovery) informerWatchErrorHandler(r *cache.Reflector, err error) {
d.metrics.failuresCount.Inc()
cache.DefaultWatchErrorHandler(r, err)
}
func (d *Discovery) mustNewSharedInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) cache.SharedInformer {
informer := cache.NewSharedInformer(lw, exampleObject, defaultEventHandlerResyncPeriod)
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
// Such a scenario would suggest an incorrect use of the API, thus the panic.
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil {
panic(err)
}
return informer
}
func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
informer := cache.NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, indexers)
// Invoking SetWatchErrorHandler should fail only if the informer has been started beforehand.
// Such a scenario would suggest an incorrect use of the API, thus the panic.
if err := informer.SetWatchErrorHandler(d.informerWatchErrorHandler); err != nil {
panic(err)
}
return informer
} }
func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) {
@ -897,3 +912,7 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta,
labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue labelSet[model.LabelName(metaLabelPrefix+string(role)+"_annotationpresent_"+ln)] = presentValue
} }
} }
func namespacedName(namespace, name string) string {
return namespace + "/" + name
}

View file

@ -21,12 +21,16 @@ import (
"time" "time"
"github.com/go-kit/log" "github.com/go-kit/log"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
fakediscovery "k8s.io/client-go/discovery/fake" fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
kubetesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -51,24 +55,29 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer
fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery) fakeDiscovery, _ := clientset.Discovery().(*fakediscovery.FakeDiscovery)
fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer} fakeDiscovery.FakedServerVersion = &version.Info{GitVersion: k8sVer}
reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg)
metrics := newDiscovererMetrics(reg, refreshMetrics)
err := metrics.Register()
if err != nil {
panic(err)
}
// TODO(ptodev): Unregister the metrics at the end of the test.
kubeMetrics, ok := metrics.(*kubernetesMetrics)
if !ok {
panic("invalid discovery metrics type")
}
d := &Discovery{ d := &Discovery{
client: clientset, client: clientset,
logger: log.NewNopLogger(), logger: log.NewNopLogger(),
role: role, role: role,
namespaceDiscovery: &nsDiscovery, namespaceDiscovery: &nsDiscovery,
ownNamespace: "own-ns", ownNamespace: "own-ns",
eventCount: prometheus.NewCounterVec( metrics: kubeMetrics,
prometheus.CounterOpts{
Namespace: discovery.KubernetesMetricsNamespace,
Name: "events_total",
Help: "The number of Kubernetes events handled.",
},
[]string{"role", "event"},
),
} }
d.metricRegisterer = discovery.NewMetricRegisterer(prometheus.NewRegistry(), []prometheus.Collector{d.eventCount})
return d, clientset return d, clientset
} }
@ -123,17 +132,11 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
} }
resChan := make(chan map[string]*targetgroup.Group) resChan := make(chan map[string]*targetgroup.Group)
go readResultWithTimeout(t, ch, d.expectedMaxItems, time.Second, resChan) go readResultWithTimeout(t, ctx, ch, d.expectedMaxItems, time.Second, resChan)
dd, ok := d.discovery.(hasSynced) dd, ok := d.discovery.(hasSynced)
if !ok { require.True(t, ok, "discoverer does not implement hasSynced interface")
t.Errorf("discoverer does not implement hasSynced interface") require.True(t, cache.WaitForCacheSync(ctx.Done(), dd.hasSynced), "discoverer failed to sync: %v", dd)
return
}
if !cache.WaitForCacheSync(ctx.Done(), dd.hasSynced) {
t.Errorf("discoverer failed to sync: %v", dd)
return
}
if d.afterStart != nil { if d.afterStart != nil {
d.afterStart() d.afterStart()
@ -142,13 +145,18 @@ func (d k8sDiscoveryTest) Run(t *testing.T) {
if d.expectedRes != nil { if d.expectedRes != nil {
res := <-resChan res := <-resChan
requireTargetGroups(t, d.expectedRes, res) requireTargetGroups(t, d.expectedRes, res)
} else {
// Stop readResultWithTimeout and wait for it.
cancel()
<-resChan
} }
} }
// readResultWithTimeout reads all targetgroups from channel with timeout. // readResultWithTimeout reads all targetgroups from channel with timeout.
// It merges targetgroups by source and sends the result to result channel. // It merges targetgroups by source and sends the result to result channel.
func readResultWithTimeout(t *testing.T, ch <-chan []*targetgroup.Group, max int, timeout time.Duration, resChan chan<- map[string]*targetgroup.Group) { func readResultWithTimeout(t *testing.T, ctx context.Context, ch <-chan []*targetgroup.Group, max int, stopAfter time.Duration, resChan chan<- map[string]*targetgroup.Group) {
res := make(map[string]*targetgroup.Group) res := make(map[string]*targetgroup.Group)
timeout := time.After(stopAfter)
Loop: Loop:
for { for {
select { select {
@ -163,12 +171,15 @@ Loop:
// Reached max target groups we may get, break fast. // Reached max target groups we may get, break fast.
break Loop break Loop
} }
case <-time.After(timeout): case <-timeout:
// Because we use queue, an object that is created then // Because we use queue, an object that is created then
// deleted or updated may be processed only once. // deleted or updated may be processed only once.
// So possibly we may skip events, timed out here. // So possibly we may skip events, timed out here.
t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max) t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(res), max)
break Loop break Loop
case <-ctx.Done():
t.Logf("stopped, got %d (max: %d) items", len(res), max)
break Loop
} }
} }
@ -307,3 +318,39 @@ func TestCheckNetworkingV1Supported(t *testing.T) {
}) })
} }
} }
func TestFailuresCountMetric(t *testing.T) {
tests := []struct {
role Role
minFailedWatches int
}{
{RoleNode, 1},
{RolePod, 1},
{RoleService, 1},
{RoleEndpoint, 3},
{RoleEndpointSlice, 3},
{RoleIngress, 1},
}
for _, tc := range tests {
tc := tc
t.Run(string(tc.role), func(t *testing.T) {
t.Parallel()
n, c := makeDiscovery(tc.role, NamespaceDiscovery{})
// The counter is initialized and no failures at the beginning.
require.Equal(t, float64(0), prom_testutil.ToFloat64(n.metrics.failuresCount))
// Simulate an error on watch requests.
c.Discovery().(*fakediscovery.FakeDiscovery).PrependWatchReactor("*", func(action kubetesting.Action) (bool, watch.Interface, error) {
return true, nil, apierrors.NewUnauthorized("unauthorized")
})
// Start the discovery.
k8sDiscoveryTest{discovery: n}.Run(t)
// At least the errors of the initial watches should be caught (watches are retried on errors).
require.GreaterOrEqual(t, prom_testutil.ToFloat64(n.metrics.failuresCount), float64(tc.minFailedWatches))
})
}
}

View file

@ -0,0 +1,86 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/prometheus/discovery"
)
var _ discovery.DiscovererMetrics = (*kubernetesMetrics)(nil)
type kubernetesMetrics struct {
eventCount *prometheus.CounterVec
failuresCount prometheus.Counter
metricRegisterer discovery.MetricRegisterer
}
func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
m := &kubernetesMetrics{
eventCount: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: discovery.KubernetesMetricsNamespace,
Name: "events_total",
Help: "The number of Kubernetes events handled.",
},
[]string{"role", "event"},
),
failuresCount: prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: discovery.KubernetesMetricsNamespace,
Name: "failures_total",
Help: "The number of failed WATCH/LIST requests.",
},
),
}
m.metricRegisterer = discovery.NewMetricRegisterer(reg, []prometheus.Collector{
m.eventCount,
m.failuresCount,
})
// Initialize metric vectors.
for _, role := range []string{
RoleEndpointSlice.String(),
RoleEndpoint.String(),
RoleNode.String(),
RolePod.String(),
RoleService.String(),
RoleIngress.String(),
} {
for _, evt := range []string{
MetricLabelRoleAdd,
MetricLabelRoleDelete,
MetricLabelRoleUpdate,
} {
m.eventCount.WithLabelValues(role, evt)
}
}
m.failuresCount.Add(0)
return m
}
// Register implements discovery.DiscovererMetrics.
func (m *kubernetesMetrics) Register() error {
return m.metricRegisterer.RegisterMetrics()
}
// Unregister implements discovery.DiscovererMetrics.
func (m *kubernetesMetrics) Unregister() {
m.metricRegisterer.UnregisterMetrics()
}

View file

@ -33,7 +33,10 @@ import (
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
const nodeIndex = "node" const (
nodeIndex = "node"
podIndex = "pod"
)
// Pod discovers new pod targets. // Pod discovers new pod targets.
type Pod struct { type Pod struct {
@ -326,7 +329,7 @@ func podSource(pod *apiv1.Pod) string {
} }
func podSourceFromNamespaceAndName(namespace, name string) string { func podSourceFromNamespaceAndName(namespace, name string) string {
return "pod/" + namespace + "/" + name return "pod/" + namespacedName(namespace, name)
} }
func podReady(pod *apiv1.Pod) model.LabelValue { func podReady(pod *apiv1.Pod) model.LabelValue {

View file

@ -42,7 +42,7 @@ type provider struct {
} }
// NewManager is the Discovery Manager constructor. // NewManager is the Discovery Manager constructor.
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, options ...func(*Manager)) *Manager { func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]discovery.DiscovererMetrics, options ...func(*Manager)) *Manager {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = log.NewNopLogger()
} }
@ -55,6 +55,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
updatert: 5 * time.Second, updatert: 5 * time.Second,
triggerSend: make(chan struct{}, 1), triggerSend: make(chan struct{}, 1),
registerer: registerer, registerer: registerer,
sdMetrics: sdMetrics,
} }
for _, option := range options { for _, option := range options {
option(mgr) option(mgr)
@ -62,7 +63,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
// Register the metrics. // Register the metrics.
// We have to do this after setting all options, so that the name of the Manager is set. // We have to do this after setting all options, so that the name of the Manager is set.
if metrics, err := discovery.NewMetrics(registerer, mgr.name); err == nil { if metrics, err := discovery.NewManagerMetrics(registerer, mgr.name); err == nil {
mgr.metrics = metrics mgr.metrics = metrics
} else { } else {
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
@ -108,7 +109,8 @@ type Manager struct {
// A registerer for all service discovery metrics. // A registerer for all service discovery metrics.
registerer prometheus.Registerer registerer prometheus.Registerer
metrics *discovery.Metrics metrics *discovery.Metrics
sdMetrics map[string]discovery.DiscovererMetrics
} }
// Run starts the background processing. // Run starts the background processing.
@ -283,8 +285,8 @@ func (m *Manager) registerProviders(cfgs discovery.Configs, setName string) int
} }
typ := cfg.Name() typ := cfg.Name()
d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{ d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{
Logger: log.With(m.logger, "discovery", typ, "config", setName), Logger: log.With(m.logger, "discovery", typ, "config", setName),
Registerer: m.registerer, Metrics: m.sdMetrics[typ],
}) })
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)

View file

@ -36,6 +36,13 @@ func TestMain(m *testing.M) {
testutil.TolerantVerifyLeak(m) testutil.TolerantVerifyLeak(m)
} }
func newTestMetrics(t *testing.T, reg prometheus.Registerer) (*discovery.RefreshMetricsManager, map[string]discovery.DiscovererMetrics) {
refreshMetrics := discovery.NewRefreshMetrics(reg)
sdMetrics, err := discovery.RegisterSDMetrics(reg, refreshMetrics)
require.NoError(t, err)
return &refreshMetrics, sdMetrics
}
// TestTargetUpdatesOrder checks that the target updates are received in the expected order. // TestTargetUpdatesOrder checks that the target updates are received in the expected order.
func TestTargetUpdatesOrder(t *testing.T) { func TestTargetUpdatesOrder(t *testing.T) {
// The order by which the updates are send is determined by the interval passed to the mock discovery adapter // The order by which the updates are send is determined by the interval passed to the mock discovery adapter
@ -665,7 +672,10 @@ func TestTargetUpdatesOrder(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry()) reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
@ -710,7 +720,7 @@ func staticConfig(addrs ...string) discovery.StaticConfig {
var cfg discovery.StaticConfig var cfg discovery.StaticConfig
for i, addr := range addrs { for i, addr := range addrs {
cfg = append(cfg, &targetgroup.Group{ cfg = append(cfg, &targetgroup.Group{
Source: fmt.Sprint(i), Source: strconv.Itoa(i),
Targets: []model.LabelSet{ Targets: []model.LabelSet{
{model.AddressLabel: model.LabelValue(addr)}, {model.AddressLabel: model.LabelValue(addr)},
}, },
@ -723,7 +733,6 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
t.Helper() t.Helper()
if _, ok := tSets[poolKey]; !ok { if _, ok := tSets[poolKey]; !ok {
t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets) t.Fatalf("'%s' should be present in Pool keys: %v", poolKey, tSets)
return
} }
match := false match := false
@ -748,7 +757,11 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou
func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) { func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -777,7 +790,11 @@ func TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {
func TestDiscovererConfigs(t *testing.T) { func TestDiscovererConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -802,7 +819,11 @@ func TestDiscovererConfigs(t *testing.T) {
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -842,7 +863,11 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, nil, prometheus.NewRegistry())
reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, nil, reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -874,7 +899,11 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -897,10 +926,19 @@ func (e errorConfig) NewDiscoverer(discovery.DiscovererOptions) (discovery.Disco
return nil, e.err return nil, e.err
} }
// NewDiscovererMetrics implements discovery.Config.
func (errorConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &discovery.NoopDiscovererMetrics{}
}
func TestGaugeFailedConfigs(t *testing.T) { func TestGaugeFailedConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
discoveryManager := NewManager(ctx, log.NewNopLogger(), prometheus.NewRegistry())
reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1057,7 +1095,10 @@ func TestCoordinationWithReceiver(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel() defer cancel()
mgr := NewManager(ctx, nil, prometheus.NewRegistry()) reg := prometheus.NewRegistry()
_, sdMetrics := newTestMetrics(t, reg)
mgr := NewManager(ctx, nil, reg, sdMetrics)
require.NotNil(t, mgr) require.NotNil(t, mgr)
mgr.updatert = updateDelay mgr.updatert = updateDelay
go mgr.Run() go mgr.Run()

Some files were not shown because too many files have changed in this diff Show more