mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge remote-tracking branch 'upstream/main' into patch-2
Signed-off-by: Antoine Pultier <antoine.pultier@sintef.no>
This commit is contained in:
commit
5c2fd7988b
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
blank_issues_enabled: false
|
blank_issues_enabled: true
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Prometheus Community Support
|
- name: Prometheus Community Support
|
||||||
url: https://prometheus.io/community/
|
url: https://prometheus.io/community/
|
||||||
|
|
4
.github/workflows/buf-lint.yml
vendored
4
.github/workflows/buf-lint.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
|
- uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||||
|
|
4
.github/workflows/buf.yml
vendored
4
.github/workflows/buf.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.repository_owner == 'prometheus'
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
|
- uses: bufbuild/buf-setup-action@9672cee01808979ea1249f81d6d321217b9a10f6 # v1.47.2
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||||
|
|
103
.github/workflows/ci.yml
vendored
103
.github/workflows/ci.yml
vendored
|
@ -11,11 +11,13 @@ jobs:
|
||||||
container:
|
container:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
image: quay.io/prometheus/golang-builder:1.22-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
|
with:
|
||||||
|
enable_npm: true
|
||||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||||
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
||||||
- run: make -C documentation/examples/remote_storage
|
- run: make -C documentation/examples/remote_storage
|
||||||
|
@ -25,10 +27,10 @@ jobs:
|
||||||
name: More Go tests
|
name: More Go tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.22-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
- run: go test --tags=dedupelabels ./...
|
- run: go test --tags=dedupelabels ./...
|
||||||
- run: GOARCH=386 go test ./cmd/prometheus
|
- run: GOARCH=386 go test ./cmd/prometheus
|
||||||
|
@ -39,11 +41,14 @@ jobs:
|
||||||
test_go_oldest:
|
test_go_oldest:
|
||||||
name: Go tests with previous Go version
|
name: Go tests with previous Go version
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
# Enforce the Go version.
|
||||||
|
GOTOOLCHAIN: local
|
||||||
container:
|
container:
|
||||||
# The go version in this image should be N-1 wrt test_go.
|
# The go version in this image should be N-1 wrt test_go.
|
||||||
image: quay.io/prometheus/golang-builder:1.21-base
|
image: quay.io/prometheus/golang-builder:1.22-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- run: make build
|
- run: make build
|
||||||
# Don't run NPM build; don't run race-detector.
|
# Don't run NPM build; don't run race-detector.
|
||||||
- run: make test GO_ONLY=1 test-flags=""
|
- run: make test GO_ONLY=1 test-flags=""
|
||||||
|
@ -54,11 +59,11 @@ jobs:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.22-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
with:
|
with:
|
||||||
enable_go: false
|
enable_go: false
|
||||||
|
@ -74,10 +79,10 @@ jobs:
|
||||||
name: Go tests on Windows
|
name: Go tests on Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
- uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.23.x
|
||||||
- run: |
|
- run: |
|
||||||
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
$TestTargets = go list ./... | Where-Object { $_ -NotMatch "(github.com/prometheus/prometheus/discovery.*|github.com/prometheus/prometheus/config|github.com/prometheus/prometheus/web)"}
|
||||||
go test $TestTargets -vet=off -v
|
go test $TestTargets -vet=off -v
|
||||||
|
@ -89,9 +94,9 @@ jobs:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.22-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- run: go install ./cmd/promtool/.
|
- run: go install ./cmd/promtool/.
|
||||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||||
|
@ -107,6 +112,8 @@ jobs:
|
||||||
if: |
|
if: |
|
||||||
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
&&
|
&&
|
||||||
|
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
|
&&
|
||||||
!(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
!(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||||
&&
|
&&
|
||||||
!(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
!(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||||
|
@ -114,8 +121,8 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
thread: [ 0, 1, 2 ]
|
thread: [ 0, 1, 2 ]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/build
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||||
|
@ -127,6 +134,8 @@ jobs:
|
||||||
if: |
|
if: |
|
||||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
||
|
||
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
|
||
|
||||||
(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
(github.event_name == 'pull_request' && startsWith(github.event.pull_request.base.ref, 'release-'))
|
||||||
||
|
||
|
||||||
(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
(github.event_name == 'push' && github.event.ref == 'refs/heads/main')
|
||||||
|
@ -137,8 +146,8 @@ jobs:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/build
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
parallelism: 12
|
parallelism: 12
|
||||||
|
@ -160,12 +169,12 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
|
||||||
with:
|
with:
|
||||||
cache: false
|
cache: false
|
||||||
go-version: 1.22.x
|
go-version: 1.23.x
|
||||||
- name: Run goyacc and check for diff
|
- name: Run goyacc and check for diff
|
||||||
run: make install-goyacc check-generated-parser
|
run: make install-goyacc check-generated-parser
|
||||||
golangci:
|
golangci:
|
||||||
|
@ -173,20 +182,20 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.x
|
go-version: 1.23.x
|
||||||
- name: Install snmp_exporter/generator dependencies
|
- name: Install snmp_exporter/generator dependencies
|
||||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||||
if: github.repository == 'prometheus/snmp_exporter'
|
if: github.repository == 'prometheus/snmp_exporter'
|
||||||
- name: Lint
|
- name: Lint
|
||||||
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
|
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1
|
||||||
with:
|
with:
|
||||||
args: --verbose
|
args: --verbose
|
||||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||||
version: v1.60.2
|
version: v1.62.0
|
||||||
fuzzing:
|
fuzzing:
|
||||||
uses: ./.github/workflows/fuzzing.yml
|
uses: ./.github/workflows/fuzzing.yml
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
|
@ -199,8 +208,8 @@ jobs:
|
||||||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/publish_main
|
- uses: ./.github/promci/actions/publish_main
|
||||||
with:
|
with:
|
||||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
|
@ -211,10 +220,13 @@ jobs:
|
||||||
name: Publish release artefacts
|
name: Publish release artefacts
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
if: |
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
|
||
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- uses: ./.github/promci/actions/publish_release
|
- uses: ./.github/promci/actions/publish_release
|
||||||
with:
|
with:
|
||||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||||
|
@ -228,31 +240,40 @@ jobs:
|
||||||
needs: [test_ui, codeql]
|
needs: [test_ui, codeql]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- uses: prometheus/promci@45166329da36d74895901808f1c8c97efafc7f84 # v0.3.0
|
- uses: prometheus/promci@52c7012f5f0070d7281b8db4a119e21341d43c91 # v0.4.5
|
||||||
- name: Install nodejs
|
- name: Install nodejs
|
||||||
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
|
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
|
||||||
with:
|
with:
|
||||||
node-version-file: "web/ui/.nvmrc"
|
node-version-file: "web/ui/.nvmrc"
|
||||||
registry-url: "https://registry.npmjs.org"
|
registry-url: "https://registry.npmjs.org"
|
||||||
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
- uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2
|
||||||
with:
|
with:
|
||||||
path: ~/.npm
|
path: ~/.npm
|
||||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-node-
|
${{ runner.os }}-node-
|
||||||
- name: Check libraries version
|
- name: Check libraries version
|
||||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
if: |
|
||||||
run: ./scripts/ui_release.sh --check-package "$(echo ${{ github.ref_name }}|sed s/v2/v0/)"
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
|
||
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
|
run: ./scripts/ui_release.sh --check-package "$(./scripts/get_module_version.sh ${{ github.ref_name }})"
|
||||||
- name: build
|
- name: build
|
||||||
run: make assets
|
run: make assets
|
||||||
- name: Copy files before publishing libs
|
- name: Copy files before publishing libs
|
||||||
run: ./scripts/ui_release.sh --copy
|
run: ./scripts/ui_release.sh --copy
|
||||||
- name: Publish dry-run libraries
|
- name: Publish dry-run libraries
|
||||||
if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))"
|
if: |
|
||||||
|
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
|
&&
|
||||||
|
!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
run: ./scripts/ui_release.sh --publish dry-run
|
run: ./scripts/ui_release.sh --publish dry-run
|
||||||
- name: Publish libraries
|
- name: Publish libraries
|
||||||
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.')
|
if: |
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v2.'))
|
||||||
|
||
|
||||||
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
run: ./scripts/ui_release.sh --publish
|
run: ./scripts/ui_release.sh --publish
|
||||||
env:
|
env:
|
||||||
# The setup-node action writes an .npmrc file with this env variable
|
# The setup-node action writes an .npmrc file with this env variable
|
||||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -24,15 +24,15 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
|
uses: github/codeql-action/init@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
|
uses: github/codeql-action/autobuild@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
|
uses: github/codeql-action/analyze@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # v3.27.5
|
||||||
|
|
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
||||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||||
steps:
|
steps:
|
||||||
- name: git checkout
|
- name: git checkout
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: Set docker hub repo name
|
- name: Set docker hub repo name
|
||||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||||
- name: Push README to Dockerhub
|
- name: Push README to Dockerhub
|
||||||
|
@ -40,7 +40,7 @@ jobs:
|
||||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||||
steps:
|
steps:
|
||||||
- name: git checkout
|
- name: git checkout
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- name: Set quay.io org name
|
- name: Set quay.io org name
|
||||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||||
- name: Set quay.io repo name
|
- name: Set quay.io repo name
|
||||||
|
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
||||||
fuzz-seconds: 600
|
fuzz-seconds: 600
|
||||||
dry-run: false
|
dry-run: false
|
||||||
- name: Upload Crash
|
- name: Upload Crash
|
||||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
|
||||||
if: failure() && steps.build.outcome == 'success'
|
if: failure() && steps.build.outcome == 'success'
|
||||||
with:
|
with:
|
||||||
name: artifacts
|
name: artifacts
|
||||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder
|
image: quay.io/prometheus/golang-builder
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
- run: ./scripts/sync_repo_files.sh
|
- run: ./scripts/sync_repo_files.sh
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||||
|
|
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout code"
|
- name: "Checkout code"
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ jobs:
|
||||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||||
# format to the repository Actions tab.
|
# format to the repository Actions tab.
|
||||||
- name: "Upload artifact"
|
- name: "Upload artifact"
|
||||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # tag=v4.3.4
|
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # tag=v4.4.3
|
||||||
with:
|
with:
|
||||||
name: SARIF file
|
name: SARIF file
|
||||||
path: results.sarif
|
path: results.sarif
|
||||||
|
@ -45,6 +45,6 @@ jobs:
|
||||||
|
|
||||||
# Upload the results to GitHub's code scanning dashboard.
|
# Upload the results to GitHub's code scanning dashboard.
|
||||||
- name: "Upload to code-scanning"
|
- name: "Upload to code-scanning"
|
||||||
uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6
|
uses: github/codeql-action/upload-sarif@f09c1c0a94de965c15400f5634aa42fac8fb8f88 # tag=v3.27.5
|
||||||
with:
|
with:
|
||||||
sarif_file: results.sarif
|
sarif_file: results.sarif
|
||||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -22,7 +22,7 @@ benchmark.txt
|
||||||
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
|
/documentation/examples/remote_storage/example_write_adapter/example_write_adapter
|
||||||
|
|
||||||
npm_licenses.tar.bz2
|
npm_licenses.tar.bz2
|
||||||
/web/ui/static/react
|
/web/ui/static
|
||||||
|
|
||||||
/vendor
|
/vendor
|
||||||
/.build
|
/.build
|
||||||
|
|
|
@ -23,6 +23,7 @@ linters:
|
||||||
- usestdlibvars
|
- usestdlibvars
|
||||||
- whitespace
|
- whitespace
|
||||||
- loggercheck
|
- loggercheck
|
||||||
|
- sloglint
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
|
@ -100,8 +101,6 @@ linters-settings:
|
||||||
- (net/http.ResponseWriter).Write
|
- (net/http.ResponseWriter).Write
|
||||||
# No need to check for errors on server's shutdown.
|
# No need to check for errors on server's shutdown.
|
||||||
- (*net/http.Server).Shutdown
|
- (*net/http.Server).Shutdown
|
||||||
# Never check for logger errors.
|
|
||||||
- (github.com/go-kit/log.Logger).Log
|
|
||||||
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||||
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||||
goimports:
|
goimports:
|
||||||
|
@ -110,7 +109,7 @@ linters-settings:
|
||||||
extra-rules: true
|
extra-rules: true
|
||||||
perfsprint:
|
perfsprint:
|
||||||
# Optimizes `fmt.Errorf`.
|
# Optimizes `fmt.Errorf`.
|
||||||
errorf: false
|
errorf: true
|
||||||
revive:
|
revive:
|
||||||
# By default, revive will enable only the linting rules that are named in the configuration file.
|
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||||
# So, it's needed to explicitly enable all required rules here.
|
# So, it's needed to explicitly enable all required rules here.
|
||||||
|
@ -153,14 +152,4 @@ linters-settings:
|
||||||
disable:
|
disable:
|
||||||
- float-compare
|
- float-compare
|
||||||
- go-require
|
- go-require
|
||||||
enable:
|
enable-all: true
|
||||||
- bool-compare
|
|
||||||
- compares
|
|
||||||
- empty
|
|
||||||
- error-is-as
|
|
||||||
- error-nil
|
|
||||||
- expected-actual
|
|
||||||
- len
|
|
||||||
- require-error
|
|
||||||
- suite-dont-use-pkg
|
|
||||||
- suite-extra-assert-call
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
go:
|
go:
|
||||||
# Whenever the Go version is updated here,
|
# Whenever the Go version is updated here,
|
||||||
# .github/workflows should also be updated.
|
# .github/workflows should also be updated.
|
||||||
version: 1.22
|
version: 1.23
|
||||||
repository:
|
repository:
|
||||||
path: github.com/prometheus/prometheus
|
path: github.com/prometheus/prometheus
|
||||||
build:
|
build:
|
||||||
|
@ -28,8 +28,6 @@ tarball:
|
||||||
# Whenever there are new files to include in the tarball,
|
# Whenever there are new files to include in the tarball,
|
||||||
# remember to make sure the new files will be generated after `make build`.
|
# remember to make sure the new files will be generated after `make build`.
|
||||||
files:
|
files:
|
||||||
- consoles
|
|
||||||
- console_libraries
|
|
||||||
- documentation/examples/prometheus.yml
|
- documentation/examples/prometheus.yml
|
||||||
- LICENSE
|
- LICENSE
|
||||||
- NOTICE
|
- NOTICE
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
extends: default
|
extends: default
|
||||||
ignore: |
|
ignore: |
|
||||||
ui/react-app/node_modules
|
**/node_modules
|
||||||
|
|
||||||
rules:
|
rules:
|
||||||
braces:
|
braces:
|
||||||
|
|
125
CHANGELOG.md
125
CHANGELOG.md
|
@ -2,13 +2,121 @@
|
||||||
|
|
||||||
## unreleased
|
## unreleased
|
||||||
|
|
||||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
* [CHANGE] Notifier: Increment the prometheus_notifications_errors_total metric by the number of affected alerts rather than by one per batch of affected alerts. #15428
|
||||||
* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
|
* [ENHANCEMENT] OTLP receiver: Convert also metric metadata. #15416
|
||||||
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
|
|
||||||
|
## 3.0.0 / 2024-11-14
|
||||||
|
|
||||||
|
This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/).
|
||||||
|
|
||||||
|
* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376
|
||||||
|
* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373
|
||||||
|
* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136
|
||||||
|
* [CHANGE] Remote-write: default enable_http2 to false. #15219
|
||||||
|
* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164
|
||||||
|
* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178
|
||||||
|
* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657
|
||||||
|
* [CHANGE] Disallow configuring AM with the v1 api. #13883
|
||||||
|
* [CHANGE] regexp `.` now matches all characters (performance improvement). #14505
|
||||||
|
* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930
|
||||||
|
* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894
|
||||||
|
* [CHANGE] Prometheus will not add or remove port numbers from the target address. `no-default-scrape-port` feature flag removed. #14160
|
||||||
|
* [CHANGE] Logging: the format of log lines has changed a little, along with the adoption of Go's Structured Logging package. #14906
|
||||||
|
* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion` is enabled. #14738
|
||||||
|
* [CHANGE] Float literals and time durations being the same is now a stable fetaure. #15111
|
||||||
|
* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872
|
||||||
|
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
|
||||||
|
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
|
||||||
|
* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365
|
||||||
|
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705
|
||||||
|
* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807
|
||||||
|
* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770
|
||||||
|
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
||||||
|
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||||
|
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||||
|
* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384
|
||||||
|
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
|
||||||
|
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
|
||||||
|
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
|
||||||
|
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
|
||||||
|
* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096
|
||||||
|
* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082
|
||||||
|
* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677
|
||||||
|
* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546
|
||||||
|
* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909
|
||||||
|
* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929
|
||||||
|
* [ENHANCEMENT] Consul SD: Support catalog filters. #11224
|
||||||
|
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
||||||
|
* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975
|
||||||
|
* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932
|
||||||
|
* [PERF] TSDB: Grow postings by doubling. #14721
|
||||||
|
* [PERF] Relabeling: Optimize adding a constant label pair. #12180
|
||||||
|
* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357
|
||||||
|
* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341
|
||||||
|
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
|
||||||
|
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941
|
||||||
|
* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251
|
||||||
|
* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095
|
||||||
|
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
|
||||||
|
* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854
|
||||||
|
* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884
|
||||||
|
* [BUGFIX] PromQL: Unary negation of native histograms. #14821
|
||||||
|
* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025
|
||||||
|
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
|
||||||
|
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
|
||||||
|
|
||||||
|
## 2.53.3 / 2024-11-04
|
||||||
|
|
||||||
|
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685, #14740
|
||||||
|
|
||||||
|
## 2.53.2 / 2024-08-09
|
||||||
|
|
||||||
|
Fix a bug where Prometheus would crash with a segmentation fault if a remote-read
|
||||||
|
request accessed a block on disk at about the same time as TSDB created a new block.
|
||||||
|
|
||||||
|
[BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
|
||||||
|
|
||||||
|
## 2.55.1 / 2024-11-04
|
||||||
|
|
||||||
|
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
|
||||||
|
|
||||||
|
## 2.55.0 / 2024-10-22
|
||||||
|
|
||||||
|
* [FEATURE] PromQL: Add experimental `info` function. #14495
|
||||||
|
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
|
||||||
|
* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817
|
||||||
|
* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815
|
||||||
|
* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734
|
||||||
|
* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200
|
||||||
|
* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346
|
||||||
|
* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403
|
||||||
|
* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506
|
||||||
|
* [FEATURE] TSDB: Add `delayed-compaction` feature flag, for people running many Prometheus to randomize timing. #12532
|
||||||
|
* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706
|
||||||
|
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
||||||
|
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
||||||
|
* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450
|
||||||
|
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
||||||
|
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655, #14985
|
||||||
|
* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621
|
||||||
|
* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413
|
||||||
|
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816
|
||||||
|
* [ENHANCEMENT] API: Support multiple listening addresses. #14665
|
||||||
|
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
|
||||||
|
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120
|
||||||
|
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
|
||||||
|
* [BUGFIX] PromQL: make sort_by_label stable. #14985
|
||||||
|
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
|
||||||
|
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622
|
||||||
|
* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810
|
||||||
|
* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766
|
||||||
|
* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716
|
||||||
|
* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821
|
||||||
|
* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042
|
||||||
|
|
||||||
## 2.54.1 / 2024-08-27
|
## 2.54.1 / 2024-08-27
|
||||||
|
|
||||||
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685
|
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps (mixing samples of the same series with and without timestamps is still rejected). #14685
|
||||||
* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654
|
* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654
|
||||||
* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538
|
* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538
|
||||||
* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514
|
* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514
|
||||||
|
@ -90,6 +198,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
|
||||||
## 2.52.0 / 2024-05-07
|
## 2.52.0 / 2024-05-07
|
||||||
|
|
||||||
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
|
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
|
||||||
|
* [CHANGE] Scrape: Multiple samples (even with different timestamps) are treated as duplicates during one scrape.
|
||||||
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
|
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
|
||||||
* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
|
* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
|
||||||
* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
|
* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
|
||||||
|
@ -103,7 +212,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
|
||||||
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
|
* [ENHANCEMENT] TSDB: Pause regular block compactions if the head needs to be compacted (prioritize head as it increases memory consumption). #13754
|
||||||
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
|
* [ENHANCEMENT] Observability: Improved logging during signal handling termination. #13772
|
||||||
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
|
* [ENHANCEMENT] Observability: All log lines for drop series use "num_dropped" key consistently. #13823
|
||||||
* [ENHANCEMENT] Observability: Log chunk snapshot and mmaped chunk replay duration during WAL replay. #13838
|
* [ENHANCEMENT] Observability: Log chunk snapshot and mmapped chunk replay duration during WAL replay. #13838
|
||||||
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
|
* [ENHANCEMENT] Observability: Log if the block is being created from WBL during compaction. #13846
|
||||||
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
|
* [BUGFIX] PromQL: Fix inaccurate sample number statistic when querying histograms. #13667
|
||||||
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
|
* [BUGFIX] PromQL: Fix `histogram_stddev` and `histogram_stdvar` for cases where the histogram has negative buckets. #13852
|
||||||
|
@ -640,7 +749,7 @@ The binaries published with this release are built with Go1.17.8 to avoid [CVE-2
|
||||||
|
|
||||||
## 2.33.0 / 2022-01-29
|
## 2.33.0 / 2022-01-29
|
||||||
|
|
||||||
* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
|
* [CHANGE] PromQL: Promote negative offset and `@` modifier to stable features. #10121
|
||||||
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
|
* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
|
||||||
* [FEATURE] Config: Add `stripPort` template function. #10002
|
* [FEATURE] Config: Add `stripPort` template function. #10002
|
||||||
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
|
* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
|
||||||
|
@ -877,7 +986,7 @@ This vulnerability has been reported by Aaron Devaney from MDSec.
|
||||||
* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682
|
* [ENHANCEMENT] Templating: Enable parsing strings in `humanize` functions. #8682
|
||||||
* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659
|
* [BUGFIX] UI: Provide errors instead of blank page on TSDB Status Page. #8654 #8659
|
||||||
* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790
|
* [BUGFIX] TSDB: Do not panic when writing very large records to the WAL. #8790
|
||||||
* [BUGFIX] TSDB: Avoid panic when mmaped memory is referenced after the file is closed. #8723
|
* [BUGFIX] TSDB: Avoid panic when mmapped memory is referenced after the file is closed. #8723
|
||||||
* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737
|
* [BUGFIX] Scaleway Discovery: Fix nil pointer dereference. #8737
|
||||||
* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766
|
* [BUGFIX] Consul Discovery: Restart no longer required after config update with no targets. #8766
|
||||||
|
|
||||||
|
@ -1803,7 +1912,7 @@ information, read the announcement blog post and migration guide.
|
||||||
## 1.7.0 / 2017-06-06
|
## 1.7.0 / 2017-06-06
|
||||||
|
|
||||||
* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy.
|
* [CHANGE] Compress remote storage requests and responses with unframed/raw snappy.
|
||||||
* [CHANGE] Properly ellide secrets in config.
|
* [CHANGE] Properly elide secrets in config.
|
||||||
* [FEATURE] Add OpenStack service discovery.
|
* [FEATURE] Add OpenStack service discovery.
|
||||||
* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces.
|
* [FEATURE] Add ability to limit Kubernetes service discovery to certain namespaces.
|
||||||
* [FEATURE] Add metric for discovered number of Alertmanagers.
|
* [FEATURE] Add metric for discovered number of Alertmanagers.
|
||||||
|
|
10
Dockerfile
10
Dockerfile
|
@ -2,27 +2,23 @@ ARG ARCH="amd64"
|
||||||
ARG OS="linux"
|
ARG OS="linux"
|
||||||
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
|
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
|
||||||
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
|
||||||
|
|
||||||
ARG ARCH="amd64"
|
ARG ARCH="amd64"
|
||||||
ARG OS="linux"
|
ARG OS="linux"
|
||||||
COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
|
COPY .build/${OS}-${ARCH}/prometheus /bin/prometheus
|
||||||
COPY .build/${OS}-${ARCH}/promtool /bin/promtool
|
COPY .build/${OS}-${ARCH}/promtool /bin/promtool
|
||||||
COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
|
COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml
|
||||||
COPY console_libraries/ /usr/share/prometheus/console_libraries/
|
|
||||||
COPY consoles/ /usr/share/prometheus/consoles/
|
|
||||||
COPY LICENSE /LICENSE
|
COPY LICENSE /LICENSE
|
||||||
COPY NOTICE /NOTICE
|
COPY NOTICE /NOTICE
|
||||||
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
COPY npm_licenses.tar.bz2 /npm_licenses.tar.bz2
|
||||||
|
|
||||||
WORKDIR /prometheus
|
WORKDIR /prometheus
|
||||||
RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \
|
RUN chown -R nobody:nobody /etc/prometheus /prometheus
|
||||||
chown -R nobody:nobody /etc/prometheus /prometheus
|
|
||||||
|
|
||||||
USER nobody
|
USER nobody
|
||||||
EXPOSE 9090
|
EXPOSE 9090
|
||||||
VOLUME [ "/prometheus" ]
|
VOLUME [ "/prometheus" ]
|
||||||
ENTRYPOINT [ "/bin/prometheus" ]
|
ENTRYPOINT [ "/bin/prometheus" ]
|
||||||
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
CMD [ "--config.file=/etc/prometheus/prometheus.yml", \
|
||||||
"--storage.tsdb.path=/prometheus", \
|
"--storage.tsdb.path=/prometheus" ]
|
||||||
"--web.console.libraries=/usr/share/prometheus/console_libraries", \
|
|
||||||
"--web.console.templates=/usr/share/prometheus/consoles" ]
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
General maintainers:
|
General maintainers:
|
||||||
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
|
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
|
||||||
* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
|
|
||||||
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
|
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
|
||||||
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
|
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
|
||||||
|
|
||||||
|
@ -17,9 +16,8 @@ Maintainers for specific parts of the codebase:
|
||||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
||||||
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `otlptranslator`: Arthur Silva Sens (<arthursens2005@gmail.com> / @ArthurSens), Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
|
|
||||||
* `web`
|
* `web`
|
||||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||||
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
* `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
|
||||||
|
|
39
Makefile
39
Makefile
|
@ -30,6 +30,11 @@ include Makefile.common
|
||||||
|
|
||||||
DOCKER_IMAGE_NAME ?= prometheus
|
DOCKER_IMAGE_NAME ?= prometheus
|
||||||
|
|
||||||
|
# Only build UI if PREBUILT_ASSETS_STATIC_DIR is not set
|
||||||
|
ifdef PREBUILT_ASSETS_STATIC_DIR
|
||||||
|
SKIP_UI_BUILD = true
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: update-npm-deps
|
.PHONY: update-npm-deps
|
||||||
update-npm-deps:
|
update-npm-deps:
|
||||||
@echo ">> updating npm dependencies"
|
@echo ">> updating npm dependencies"
|
||||||
|
@ -42,13 +47,17 @@ upgrade-npm-deps:
|
||||||
|
|
||||||
.PHONY: ui-bump-version
|
.PHONY: ui-bump-version
|
||||||
ui-bump-version:
|
ui-bump-version:
|
||||||
version=$$(sed s/2/0/ < VERSION) && ./scripts/ui_release.sh --bump-version "$${version}"
|
version=$$(./scripts/get_module_version.sh) && ./scripts/ui_release.sh --bump-version "$${version}"
|
||||||
cd web/ui && npm install
|
cd web/ui && npm install
|
||||||
git add "./web/ui/package-lock.json" "./**/package.json"
|
git add "./web/ui/package-lock.json" "./**/package.json"
|
||||||
|
|
||||||
.PHONY: ui-install
|
.PHONY: ui-install
|
||||||
ui-install:
|
ui-install:
|
||||||
cd $(UI_PATH) && npm install
|
cd $(UI_PATH) && npm install
|
||||||
|
# The old React app has been separated from the npm workspaces setup to avoid
|
||||||
|
# issues with conflicting dependencies. This is a temporary solution until the
|
||||||
|
# new Mantine-based UI is fully integrated and the old app can be removed.
|
||||||
|
cd $(UI_PATH)/react-app && npm install
|
||||||
|
|
||||||
.PHONY: ui-build
|
.PHONY: ui-build
|
||||||
ui-build:
|
ui-build:
|
||||||
|
@ -65,10 +74,30 @@ ui-test:
|
||||||
.PHONY: ui-lint
|
.PHONY: ui-lint
|
||||||
ui-lint:
|
ui-lint:
|
||||||
cd $(UI_PATH) && npm run lint
|
cd $(UI_PATH) && npm run lint
|
||||||
|
# The old React app has been separated from the npm workspaces setup to avoid
|
||||||
|
# issues with conflicting dependencies. This is a temporary solution until the
|
||||||
|
# new Mantine-based UI is fully integrated and the old app can be removed.
|
||||||
|
cd $(UI_PATH)/react-app && npm run lint
|
||||||
|
|
||||||
.PHONY: assets
|
.PHONY: assets
|
||||||
|
ifndef SKIP_UI_BUILD
|
||||||
assets: ui-install ui-build
|
assets: ui-install ui-build
|
||||||
|
|
||||||
|
.PHONY: npm_licenses
|
||||||
|
npm_licenses: ui-install
|
||||||
|
@echo ">> bundling npm licenses"
|
||||||
|
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
|
||||||
|
ln -s . npm_licenses
|
||||||
|
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
|
||||||
|
rm -f npm_licenses
|
||||||
|
else
|
||||||
|
assets:
|
||||||
|
@echo '>> skipping assets build, pre-built assets provided'
|
||||||
|
|
||||||
|
npm_licenses:
|
||||||
|
@echo '>> skipping assets npm licenses, pre-built assets provided'
|
||||||
|
endif
|
||||||
|
|
||||||
.PHONY: assets-compress
|
.PHONY: assets-compress
|
||||||
assets-compress: assets
|
assets-compress: assets
|
||||||
@echo '>> compressing assets'
|
@echo '>> compressing assets'
|
||||||
|
@ -117,14 +146,6 @@ else
|
||||||
test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
|
test: check-generated-parser common-test ui-build-module ui-test ui-lint check-go-mod-version
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: npm_licenses
|
|
||||||
npm_licenses: ui-install
|
|
||||||
@echo ">> bundling npm licenses"
|
|
||||||
rm -f $(REACT_APP_NPM_LICENSES_TARBALL) npm_licenses
|
|
||||||
ln -s . npm_licenses
|
|
||||||
find npm_licenses/$(UI_NODE_MODULES_PATH) -iname "license*" | tar cfj $(REACT_APP_NPM_LICENSES_TARBALL) --files-from=-
|
|
||||||
rm -f npm_licenses
|
|
||||||
|
|
||||||
.PHONY: tarball
|
.PHONY: tarball
|
||||||
tarball: npm_licenses common-tarball
|
tarball: npm_licenses common-tarball
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
GOLANGCI_LINT_VERSION ?= v1.62.0
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
|
@ -275,3 +275,9 @@ $(1)_precheck:
|
||||||
exit 1; \
|
exit 1; \
|
||||||
fi
|
fi
|
||||||
endef
|
endef
|
||||||
|
|
||||||
|
govulncheck: install-govulncheck
|
||||||
|
govulncheck ./...
|
||||||
|
|
||||||
|
install-govulncheck:
|
||||||
|
command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
19
README.md
19
README.md
|
@ -115,7 +115,7 @@ The Makefile provides several targets:
|
||||||
|
|
||||||
Prometheus is bundled with many service discovery plugins.
|
Prometheus is bundled with many service discovery plugins.
|
||||||
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
When building Prometheus from source, you can edit the [plugins.yml](./plugins.yml)
|
||||||
file to disable some service discoveries. The file is a yaml-formated list of go
|
file to disable some service discoveries. The file is a yaml-formatted list of go
|
||||||
import path that will be built into the Prometheus binary.
|
import path that will be built into the Prometheus binary.
|
||||||
|
|
||||||
After you have changed the file, you
|
After you have changed the file, you
|
||||||
|
@ -158,8 +158,19 @@ This is experimental.
|
||||||
### Prometheus code base
|
### Prometheus code base
|
||||||
|
|
||||||
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
|
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
|
||||||
Prometheus release number do not exactly match Go module releases. For the
|
Prometheus release number do not exactly match Go module releases.
|
||||||
Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.
|
|
||||||
|
For the
|
||||||
|
Prometheus v3.y.z releases, we are publishing equivalent v0.3y.z tags. The y in v0.3y.z is always padded to two digits, with a leading zero if needed.
|
||||||
|
|
||||||
|
Therefore, a user that would want to use Prometheus v3.0.0 as a library could do:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
go get github.com/prometheus/prometheus@v0.300.0
|
||||||
|
```
|
||||||
|
|
||||||
|
For the
|
||||||
|
Prometheus v2.y.z releases, we published the equivalent v0.y.z tags.
|
||||||
|
|
||||||
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
|
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
|
||||||
|
|
||||||
|
@ -177,7 +188,7 @@ For more information on building, running, and developing on the React-based UI,
|
||||||
|
|
||||||
## More information
|
## More information
|
||||||
|
|
||||||
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
|
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v3.y.z will be displayed as v0.3y.z (the y in v0.3y.z is always padded to two digits, with a leading zero if needed), while v2.y.z will be displayed as v0.y.z.
|
||||||
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
|
@ -59,6 +59,7 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
|
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
|
||||||
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
|
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
|
||||||
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
|
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
| v2.55 | 2024-09-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
@ -187,7 +188,7 @@ the Prometheus server, we use major version zero releases for the libraries.
|
||||||
Tag the new library release via the following commands:
|
Tag the new library release via the following commands:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
tag="v$(sed s/2/0/ < VERSION)"
|
tag="v$(./scripts/get_module_version.sh)"
|
||||||
git tag -s "${tag}" -m "${tag}"
|
git tag -s "${tag}" -m "${tag}"
|
||||||
git push origin "${tag}"
|
git push origin "${tag}"
|
||||||
```
|
```
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -31,9 +31,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -42,6 +42,11 @@ import (
|
||||||
"github.com/prometheus/prometheus/rules"
|
"github.com/prometheus/prometheus/rules"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// This can be removed when the default validation scheme in common is updated.
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}
|
||||||
|
|
||||||
const startupTime = 10 * time.Second
|
const startupTime = 10 * time.Second
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -120,6 +125,7 @@ func TestFailedStartupExitCode(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
fakeInputFile := "fake-input-file"
|
fakeInputFile := "fake-input-file"
|
||||||
expectedExitStatus := 2
|
expectedExitStatus := 2
|
||||||
|
@ -206,9 +212,36 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
|
for _, tc := range []struct {
|
||||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
size string
|
||||||
|
exitCode int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
size: "9MB",
|
||||||
|
exitCode: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
size: "257MB",
|
||||||
|
exitCode: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
size: "10",
|
||||||
|
exitCode: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
size: "1GB",
|
||||||
|
exitCode: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
size: "12MB",
|
||||||
|
exitCode: 0,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.size, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||||
|
|
||||||
// Log stderr in case of failure.
|
// Log stderr in case of failure.
|
||||||
stderr, err := prom.StderrPipe()
|
stderr, err := prom.StderrPipe()
|
||||||
|
@ -221,7 +254,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
err = prom.Start()
|
err = prom.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if expectedExitStatus == 0 {
|
if tc.exitCode == 0 {
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
go func() { done <- prom.Wait() }()
|
go func() { done <- prom.Wait() }()
|
||||||
select {
|
select {
|
||||||
|
@ -231,7 +264,7 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
prom.Process.Kill()
|
prom.Process.Kill()
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = prom.Wait()
|
err = prom.Wait()
|
||||||
|
@ -239,19 +272,33 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
||||||
var exitError *exec.ExitError
|
var exitError *exec.ExitError
|
||||||
require.ErrorAs(t, err, &exitError)
|
require.ErrorAs(t, err, &exitError)
|
||||||
status := exitError.Sys().(syscall.WaitStatus)
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
require.Equal(t, tc.exitCode, status.ExitStatus())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
|
for _, tc := range []struct {
|
||||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
size string
|
||||||
|
exitCode int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
size: "512KB",
|
||||||
|
exitCode: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
size: "1MB",
|
||||||
|
exitCode: 0,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.size, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||||
|
|
||||||
// Log stderr in case of failure.
|
// Log stderr in case of failure.
|
||||||
stderr, err := prom.StderrPipe()
|
stderr, err := prom.StderrPipe()
|
||||||
|
@ -264,7 +311,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||||
err = prom.Start()
|
err = prom.Start()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
if expectedExitStatus == 0 {
|
if tc.exitCode == 0 {
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
go func() { done <- prom.Wait() }()
|
go func() { done <- prom.Wait() }()
|
||||||
select {
|
select {
|
||||||
|
@ -274,7 +321,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||||
prom.Process.Kill()
|
prom.Process.Kill()
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = prom.Wait()
|
err = prom.Wait()
|
||||||
|
@ -282,7 +329,8 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||||
var exitError *exec.ExitError
|
var exitError *exec.ExitError
|
||||||
require.ErrorAs(t, err, &exitError)
|
require.ErrorAs(t, err, &exitError)
|
||||||
status := exitError.Sys().(syscall.WaitStatus)
|
status := exitError.Sys().(syscall.WaitStatus)
|
||||||
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
require.Equal(t, tc.exitCode, status.ExitStatus())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +338,7 @@ func TestTimeMetrics(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil)
|
db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
|
@ -348,7 +396,9 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
t.Parallel()
|
||||||
|
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||||
require.NoError(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
actualExitStatus := 0
|
actualExitStatus := 0
|
||||||
|
@ -366,7 +416,9 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
t.Parallel()
|
||||||
|
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
|
|
||||||
output := bytes.Buffer{}
|
output := bytes.Buffer{}
|
||||||
prom.Stderr = &output
|
prom.Stderr = &output
|
||||||
|
@ -393,7 +445,9 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
t.Parallel()
|
||||||
|
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||||
require.NoError(t, prom.Start())
|
require.NoError(t, prom.Start())
|
||||||
|
|
||||||
actualExitStatus := 0
|
actualExitStatus := 0
|
||||||
|
@ -414,6 +468,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
mode string
|
mode string
|
||||||
|
@ -428,10 +483,11 @@ func TestModeSpecificFlags(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||||
|
|
||||||
if tc.mode == "agent" {
|
if tc.mode == "agent" {
|
||||||
args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
|
args = append(args, "--agent", "--config.file="+agentConfig)
|
||||||
} else {
|
} else {
|
||||||
args = append(args, "--config.file="+promConfig)
|
args = append(args, "--config.file="+promConfig)
|
||||||
}
|
}
|
||||||
|
@ -479,6 +535,8 @@ func TestDocumentation(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
t.SkipNow()
|
t.SkipNow()
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -503,6 +561,8 @@ func TestDocumentation(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRwProtoMsgFlagParser(t *testing.T) {
|
func TestRwProtoMsgFlagParser(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
defaultOpts := config.RemoteWriteProtoMsgs{
|
defaultOpts := config.RemoteWriteProtoMsgs{
|
||||||
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
|
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))
|
port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))
|
||||||
|
|
||||||
|
|
|
@ -125,12 +125,61 @@ func (p *queryLogTest) query(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 200, r.StatusCode)
|
require.Equal(t, 200, r.StatusCode)
|
||||||
case ruleOrigin:
|
case ruleOrigin:
|
||||||
time.Sleep(2 * time.Second)
|
// Poll the /api/v1/rules endpoint until a new rule evaluation is detected.
|
||||||
|
var lastEvalTime time.Time
|
||||||
|
for {
|
||||||
|
r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rulesBody, err := io.ReadAll(r.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer r.Body.Close()
|
||||||
|
|
||||||
|
// Parse the rules response to find the last evaluation time.
|
||||||
|
newEvalTime := parseLastEvaluation(rulesBody)
|
||||||
|
if newEvalTime.After(lastEvalTime) {
|
||||||
|
if !lastEvalTime.IsZero() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lastEvalTime = newEvalTime
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
panic("can't query this origin")
|
panic("can't query this origin")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response.
|
||||||
|
func parseLastEvaluation(rulesBody []byte) time.Time {
|
||||||
|
var ruleResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
Groups []struct {
|
||||||
|
Rules []struct {
|
||||||
|
LastEvaluation string `json:"lastEvaluation"`
|
||||||
|
} `json:"rules"`
|
||||||
|
} `json:"groups"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err := json.Unmarshal(rulesBody, &ruleResponse)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, group := range ruleResponse.Data.Groups {
|
||||||
|
for _, rule := range group.Rules {
|
||||||
|
if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil {
|
||||||
|
return evalTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
// queryString returns the expected queryString of a this test.
|
// queryString returns the expected queryString of a this test.
|
||||||
func (p *queryLogTest) queryString() string {
|
func (p *queryLogTest) queryString() string {
|
||||||
switch p.origin {
|
switch p.origin {
|
||||||
|
@ -322,7 +371,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Len(t, ql, qc)
|
require.Len(t, ql, qc)
|
||||||
} else {
|
} else {
|
||||||
require.Greater(t, len(ql), qc, "no queries logged")
|
require.GreaterOrEqual(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
|
@ -353,7 +402,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Len(t, ql, qc)
|
require.Len(t, ql, qc)
|
||||||
} else {
|
} else {
|
||||||
require.Greater(t, len(ql), qc, "no queries logged")
|
require.GreaterOrEqual(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
|
|
||||||
|
@ -393,6 +442,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine {
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
var q queryLogLine
|
var q queryLogLine
|
||||||
|
@ -406,6 +456,7 @@ func TestQueryLog(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -424,6 +475,7 @@ func TestQueryLog(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run(p.String(), func(t *testing.T) {
|
t.Run(p.String(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
p.run(t)
|
p.run(t)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
229
cmd/prometheus/reload_test.go
Normal file
229
cmd/prometheus/reload_test.go
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
const configReloadMetric = "prometheus_config_last_reload_successful"
|
||||||
|
|
||||||
|
func TestAutoReloadConfig_ValidToValid(t *testing.T) {
|
||||||
|
steps := []struct {
|
||||||
|
configText string
|
||||||
|
expectedInterval string
|
||||||
|
expectedMetric float64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
configText: `
|
||||||
|
global:
|
||||||
|
scrape_interval: 30s
|
||||||
|
`,
|
||||||
|
expectedInterval: "30s",
|
||||||
|
expectedMetric: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
configText: `
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
`,
|
||||||
|
expectedInterval: "15s",
|
||||||
|
expectedMetric: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
configText: `
|
||||||
|
global:
|
||||||
|
scrape_interval: 30s
|
||||||
|
`,
|
||||||
|
expectedInterval: "30s",
|
||||||
|
expectedMetric: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runTestSteps(t, steps)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) {
|
||||||
|
steps := []struct {
|
||||||
|
configText string
|
||||||
|
expectedInterval string
|
||||||
|
expectedMetric float64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
configText: `
|
||||||
|
global:
|
||||||
|
scrape_interval: 30s
|
||||||
|
`,
|
||||||
|
expectedInterval: "30s",
|
||||||
|
expectedMetric: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
configText: `
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
invalid_syntax
|
||||||
|
`,
|
||||||
|
expectedInterval: "30s",
|
||||||
|
expectedMetric: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
configText: `
|
||||||
|
global:
|
||||||
|
scrape_interval: 30s
|
||||||
|
`,
|
||||||
|
expectedInterval: "30s",
|
||||||
|
expectedMetric: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
runTestSteps(t, steps)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runTestSteps(t *testing.T, steps []struct {
|
||||||
|
configText string
|
||||||
|
expectedInterval string
|
||||||
|
expectedMetric float64
|
||||||
|
},
|
||||||
|
) {
|
||||||
|
configDir := t.TempDir()
|
||||||
|
configFilePath := filepath.Join(configDir, "prometheus.yml")
|
||||||
|
|
||||||
|
t.Logf("Config file path: %s", configFilePath)
|
||||||
|
|
||||||
|
require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file")
|
||||||
|
|
||||||
|
port := testutil.RandomUnprivilegedPort(t)
|
||||||
|
runPrometheusWithLogging(t, configFilePath, port)
|
||||||
|
|
||||||
|
baseURL := "http://localhost:" + strconv.Itoa(port)
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
resp, err := http.Get(baseURL + "/-/ready")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
return resp.StatusCode == http.StatusOK
|
||||||
|
}, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time")
|
||||||
|
|
||||||
|
for i, step := range steps {
|
||||||
|
t.Logf("Step %d", i)
|
||||||
|
require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step")
|
||||||
|
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return verifyScrapeInterval(t, baseURL, step.expectedInterval) &&
|
||||||
|
verifyConfigReloadMetric(t, baseURL, step.expectedMetric)
|
||||||
|
}, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool {
|
||||||
|
resp, err := http.Get(baseURL + "/api/v1/status/config")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
config := struct {
|
||||||
|
Data struct {
|
||||||
|
YAML string `json:"yaml"`
|
||||||
|
} `json:"data"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
require.NoError(t, json.Unmarshal(body, &config))
|
||||||
|
return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool {
|
||||||
|
resp, err := http.Get(baseURL + "/metrics")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
lines := string(body)
|
||||||
|
var actualValue float64
|
||||||
|
found := false
|
||||||
|
|
||||||
|
for _, line := range strings.Split(lines, "\n") {
|
||||||
|
if strings.HasPrefix(line, configReloadMetric) {
|
||||||
|
parts := strings.Fields(line)
|
||||||
|
if len(parts) >= 2 {
|
||||||
|
actualValue, err = strconv.ParseFloat(parts[1], 64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return found && actualValue == expectedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func captureLogsToTLog(t *testing.T, r io.Reader) {
|
||||||
|
scanner := bufio.NewScanner(r)
|
||||||
|
for scanner.Scan() {
|
||||||
|
t.Log(scanner.Text())
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
t.Logf("Error reading logs: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) {
|
||||||
|
stdoutPipe, stdoutWriter := io.Pipe()
|
||||||
|
stderrPipe, stderrWriter := io.Pipe()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port))
|
||||||
|
prom.Stdout = stdoutWriter
|
||||||
|
prom.Stderr = stderrWriter
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
captureLogsToTLog(t, stdoutPipe)
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
captureLogsToTLog(t, stderrPipe)
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
prom.Process.Kill()
|
||||||
|
prom.Wait()
|
||||||
|
stdoutWriter.Close()
|
||||||
|
stderrWriter.Close()
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, prom.Start())
|
||||||
|
}
|
193
cmd/prometheus/scrape_failure_log_test.go
Normal file
193
cmd/prometheus/scrape_failure_log_test.go
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestScrapeFailureLogFile(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tracks the number of requests made to the mock server.
|
||||||
|
var requestCount atomic.Int32
|
||||||
|
|
||||||
|
// Starts a server that always returns HTTP 500 errors.
|
||||||
|
mockServerAddress := startGarbageServer(t, &requestCount)
|
||||||
|
|
||||||
|
// Create a temporary directory for Prometheus configuration and logs.
|
||||||
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
|
// Define file paths for the scrape failure log and Prometheus configuration.
|
||||||
|
// Like other files, the scrape failure log file should be relative to the
|
||||||
|
// config file. Therefore, we split the name we put in the file and the full
|
||||||
|
// path used to check the content of the file.
|
||||||
|
scrapeFailureLogFileName := "scrape_failure.log"
|
||||||
|
scrapeFailureLogFile := filepath.Join(tempDir, scrapeFailureLogFileName)
|
||||||
|
promConfigFile := filepath.Join(tempDir, "prometheus.yml")
|
||||||
|
|
||||||
|
// Step 1: Set up an initial Prometheus configuration that globally
|
||||||
|
// specifies a scrape failure log file.
|
||||||
|
promConfig := fmt.Sprintf(`
|
||||||
|
global:
|
||||||
|
scrape_interval: 500ms
|
||||||
|
scrape_failure_log_file: %s
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'test_job'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['%s']
|
||||||
|
`, scrapeFailureLogFileName, mockServerAddress)
|
||||||
|
|
||||||
|
err := os.WriteFile(promConfigFile, []byte(promConfig), 0o644)
|
||||||
|
require.NoError(t, err, "Failed to write Prometheus configuration file")
|
||||||
|
|
||||||
|
// Start Prometheus with the generated configuration and a random port, enabling the lifecycle API.
|
||||||
|
port := testutil.RandomUnprivilegedPort(t)
|
||||||
|
params := []string{
|
||||||
|
"-test.main",
|
||||||
|
"--config.file=" + promConfigFile,
|
||||||
|
"--storage.tsdb.path=" + filepath.Join(tempDir, "data"),
|
||||||
|
fmt.Sprintf("--web.listen-address=127.0.0.1:%d", port),
|
||||||
|
"--web.enable-lifecycle",
|
||||||
|
}
|
||||||
|
prometheusProcess := exec.Command(promPath, params...)
|
||||||
|
prometheusProcess.Stdout = os.Stdout
|
||||||
|
prometheusProcess.Stderr = os.Stderr
|
||||||
|
|
||||||
|
err = prometheusProcess.Start()
|
||||||
|
require.NoError(t, err, "Failed to start Prometheus")
|
||||||
|
defer prometheusProcess.Process.Kill()
|
||||||
|
|
||||||
|
// Wait until the mock server receives at least two requests from Prometheus.
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return requestCount.Load() >= 2
|
||||||
|
}, 30*time.Second, 500*time.Millisecond, "Expected at least two requests to the mock server")
|
||||||
|
|
||||||
|
// Verify that the scrape failures have been logged to the specified file.
|
||||||
|
content, err := os.ReadFile(scrapeFailureLogFile)
|
||||||
|
require.NoError(t, err, "Failed to read scrape failure log")
|
||||||
|
require.Contains(t, string(content), "server returned HTTP status 500 Internal Server Error", "Expected scrape failure log entry not found")
|
||||||
|
|
||||||
|
// Step 2: Update the Prometheus configuration to remove the scrape failure
|
||||||
|
// log file setting.
|
||||||
|
promConfig = fmt.Sprintf(`
|
||||||
|
global:
|
||||||
|
scrape_interval: 1s
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'test_job'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['%s']
|
||||||
|
`, mockServerAddress)
|
||||||
|
|
||||||
|
err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644)
|
||||||
|
require.NoError(t, err, "Failed to update Prometheus configuration file")
|
||||||
|
|
||||||
|
// Reload Prometheus with the updated configuration.
|
||||||
|
reloadPrometheus(t, port)
|
||||||
|
|
||||||
|
// Count the number of lines in the scrape failure log file before any
|
||||||
|
// further requests.
|
||||||
|
preReloadLogLineCount := countLinesInFile(scrapeFailureLogFile)
|
||||||
|
|
||||||
|
// Wait for at least two more requests to the mock server to ensure
|
||||||
|
// Prometheus continues scraping.
|
||||||
|
requestsBeforeReload := requestCount.Load()
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return requestCount.Load() >= requestsBeforeReload+2
|
||||||
|
}, 30*time.Second, 500*time.Millisecond, "Expected two more requests to the mock server after configuration reload")
|
||||||
|
|
||||||
|
// Ensure that no new lines were added to the scrape failure log file after
|
||||||
|
// the configuration change.
|
||||||
|
require.Equal(t, preReloadLogLineCount, countLinesInFile(scrapeFailureLogFile), "No new lines should be added to the scrape failure log file after removing the log setting")
|
||||||
|
|
||||||
|
// Step 3: Re-add the scrape failure log file setting, but this time under
|
||||||
|
// scrape_configs, and reload Prometheus.
|
||||||
|
promConfig = fmt.Sprintf(`
|
||||||
|
global:
|
||||||
|
scrape_interval: 1s
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'test_job'
|
||||||
|
scrape_failure_log_file: %s
|
||||||
|
static_configs:
|
||||||
|
- targets: ['%s']
|
||||||
|
`, scrapeFailureLogFileName, mockServerAddress)
|
||||||
|
|
||||||
|
err = os.WriteFile(promConfigFile, []byte(promConfig), 0o644)
|
||||||
|
require.NoError(t, err, "Failed to update Prometheus configuration file")
|
||||||
|
|
||||||
|
// Reload Prometheus with the updated configuration.
|
||||||
|
reloadPrometheus(t, port)
|
||||||
|
|
||||||
|
// Wait for at least two more requests to the mock server and verify that
|
||||||
|
// new log entries are created.
|
||||||
|
postReloadLogLineCount := countLinesInFile(scrapeFailureLogFile)
|
||||||
|
requestsBeforeReAddingLog := requestCount.Load()
|
||||||
|
require.Eventually(t, func() bool {
|
||||||
|
return requestCount.Load() >= requestsBeforeReAddingLog+2
|
||||||
|
}, 30*time.Second, 500*time.Millisecond, "Expected two additional requests after re-adding the log setting")
|
||||||
|
|
||||||
|
// Confirm that new lines were added to the scrape failure log file.
|
||||||
|
require.Greater(t, countLinesInFile(scrapeFailureLogFile), postReloadLogLineCount, "New lines should be added to the scrape failure log file after re-adding the log setting")
|
||||||
|
}
|
||||||
|
|
||||||
|
// reloadPrometheus sends a reload request to the Prometheus server to apply
|
||||||
|
// updated configurations.
|
||||||
|
func reloadPrometheus(t *testing.T, port int) {
|
||||||
|
resp, err := http.Post(fmt.Sprintf("http://127.0.0.1:%d/-/reload", port), "", nil)
|
||||||
|
require.NoError(t, err, "Failed to reload Prometheus")
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode, "Unexpected status code when reloading Prometheus")
|
||||||
|
}
|
||||||
|
|
||||||
|
// startGarbageServer sets up a mock server that returns a 500 Internal Server Error
|
||||||
|
// for all requests. It also increments the request count each time it's hit.
|
||||||
|
func startGarbageServer(t *testing.T, requestCount *atomic.Int32) string {
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
requestCount.Inc()
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
}))
|
||||||
|
t.Cleanup(server.Close)
|
||||||
|
|
||||||
|
parsedURL, err := url.Parse(server.URL)
|
||||||
|
require.NoError(t, err, "Failed to parse mock server URL")
|
||||||
|
|
||||||
|
return parsedURL.Host
|
||||||
|
}
|
||||||
|
|
||||||
|
// countLinesInFile counts and returns the number of lines in the specified file.
|
||||||
|
func countLinesInFile(filePath string) int {
|
||||||
|
data, err := os.ReadFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return 0 // Return 0 if the file doesn't exist or can't be read.
|
||||||
|
}
|
||||||
|
return bytes.Count(data, []byte{'\n'})
|
||||||
|
}
|
|
@ -34,8 +34,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNotNativeHistogram = fmt.Errorf("not a native histogram")
|
errNotNativeHistogram = errors.New("not a native histogram")
|
||||||
errNotEnoughData = fmt.Errorf("not enough data")
|
errNotEnoughData = errors.New("not enough data")
|
||||||
|
|
||||||
outputHeader = `Bucket stats for each histogram series over time
|
outputHeader = `Bucket stats for each histogram series over time
|
||||||
------------------------------------------------
|
------------------------------------------------
|
||||||
|
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
|
||||||
|
|
||||||
matrix, ok := values.(model.Matrix)
|
matrix, ok := values.(model.Matrix)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
|
return nil, errors.New("query of buckets resulted in non-Matrix")
|
||||||
}
|
}
|
||||||
|
|
||||||
return matrix, nil
|
return matrix, nil
|
||||||
|
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
|
||||||
prev := matrix[i].Values[timeIdx]
|
prev := matrix[i].Values[timeIdx]
|
||||||
// Assume the results are nicely aligned.
|
// Assume the results are nicely aligned.
|
||||||
if curr.Timestamp != prev.Timestamp {
|
if curr.Timestamp != prev.Timestamp {
|
||||||
return counts, fmt.Errorf("matrix result is not time aligned")
|
return counts, errors.New("matrix result is not time aligned")
|
||||||
}
|
}
|
||||||
counts[i+1] = int(curr.Value - prev.Value)
|
counts[i+1] = int(curr.Value - prev.Value)
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,6 +109,7 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetBucketCountsAtTime(t *testing.T) {
|
func TestGetBucketCountsAtTime(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
matrix model.Matrix
|
matrix model.Matrix
|
||||||
length int
|
length int
|
||||||
|
@ -137,6 +138,7 @@ func TestGetBucketCountsAtTime(t *testing.T) {
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
|
t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
|
res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, c.expected, res)
|
require.Equal(t, c.expected, res)
|
||||||
|
@ -145,6 +147,7 @@ func TestGetBucketCountsAtTime(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCalcClassicBucketStatistics(t *testing.T) {
|
func TestCalcClassicBucketStatistics(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
matrix model.Matrix
|
matrix model.Matrix
|
||||||
expected *statistics
|
expected *statistics
|
||||||
|
@ -162,6 +165,7 @@ func TestCalcClassicBucketStatistics(t *testing.T) {
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
res, err := calcClassicBucketStatistics(c.matrix)
|
res, err := calcClassicBucketStatistics(c.matrix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, c.expected, res)
|
require.Equal(t, c.expected, res)
|
||||||
|
|
|
@ -21,9 +21,10 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/oklog/ulid"
|
"github.com/oklog/ulid"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/textparse"
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
@ -48,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
|
||||||
|
|
||||||
_, ts, _ := p.Series()
|
_, ts, _ := p.Series()
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
return 0, 0, fmt.Errorf("expected timestamp for series got none")
|
return 0, 0, errors.New("expected timestamp for series got none")
|
||||||
}
|
}
|
||||||
|
|
||||||
if *ts > maxt {
|
if *ts > maxt {
|
||||||
|
@ -120,7 +121,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
||||||
// also need to append samples throughout the whole block range. To allow that, we
|
// also need to append samples throughout the whole block range. To allow that, we
|
||||||
// pretend that the block is twice as large here, but only really add sample in the
|
// pretend that the block is twice as large here, but only really add sample in the
|
||||||
// original interval later.
|
// original interval later.
|
||||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration)
|
w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("block writer: %w", err)
|
return fmt.Errorf("block writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,6 +86,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackfill(t *testing.T) {
|
func TestBackfill(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
ToParse string
|
ToParse string
|
||||||
IsOk bool
|
IsOk bool
|
||||||
|
@ -729,6 +730,7 @@ after_eof 1 2
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.Description, func(t *testing.T) {
|
t.Run(test.Description, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
t.Logf("Test:%s", test.Description)
|
t.Logf("Test:%s", test.Description)
|
||||||
|
|
||||||
outputDir := t.TempDir()
|
outputDir := t.TempDir()
|
||||||
|
|
|
@ -32,13 +32,13 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/google/pprof/profile"
|
"github.com/google/pprof/profile"
|
||||||
"github.com/prometheus/client_golang/api"
|
"github.com/prometheus/client_golang/api"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"github.com/prometheus/exporter-toolkit/web"
|
"github.com/prometheus/exporter-toolkit/web"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -58,10 +58,16 @@ import (
|
||||||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/promql/promqltest"
|
"github.com/prometheus/prometheus/promql/promqltest"
|
||||||
|
"github.com/prometheus/prometheus/rules"
|
||||||
"github.com/prometheus/prometheus/scrape"
|
"github.com/prometheus/prometheus/scrape"
|
||||||
"github.com/prometheus/prometheus/util/documentcli"
|
"github.com/prometheus/prometheus/util/documentcli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// This can be removed when the default validation scheme in common is updated.
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
successExitCode = 0
|
successExitCode = 0
|
||||||
failureExitCode = 1
|
failureExitCode = 1
|
||||||
|
@ -211,6 +217,7 @@ func main() {
|
||||||
"test-rule-file",
|
"test-rule-file",
|
||||||
"The unit test file.",
|
"The unit test file.",
|
||||||
).Required().ExistingFiles()
|
).Required().ExistingFiles()
|
||||||
|
testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool()
|
||||||
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
|
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
|
||||||
|
|
||||||
defaultDBPath := "data/"
|
defaultDBPath := "data/"
|
||||||
|
@ -236,14 +243,14 @@ func main() {
|
||||||
|
|
||||||
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
tsdbDumpCmd := tsdbCmd.Command("dump", "Dump samples from a TSDB.")
|
||||||
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
dumpPath := tsdbDumpCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||||
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
|
dumpSandboxDirRoot := tsdbDumpCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||||
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
dumpMinTime := tsdbDumpCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||||
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
dumpMaxTime := tsdbDumpCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||||
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
dumpMatch := tsdbDumpCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||||
|
|
||||||
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
tsdbDumpOpenMetricsCmd := tsdbCmd.Command("dump-openmetrics", "[Experimental] Dump samples from a TSDB into OpenMetrics text format, excluding native histograms and staleness markers, which are not representable in OpenMetrics.")
|
||||||
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
dumpOpenMetricsPath := tsdbDumpOpenMetricsCmd.Arg("db path", "Database path (default is "+defaultDBPath+").").Default(defaultDBPath).String()
|
||||||
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory would be created in case WAL replay generates chunks. The sandbox directory is cleaned up at the end.").Default(defaultDBPath).String()
|
dumpOpenMetricsSandboxDirRoot := tsdbDumpOpenMetricsCmd.Flag("sandbox-dir-root", "Root directory where a sandbox directory will be created, this sandbox is used in case WAL replay generates chunks (default is the database path). The sandbox is cleaned up at the end.").String()
|
||||||
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
dumpOpenMetricsMinTime := tsdbDumpOpenMetricsCmd.Flag("min-time", "Minimum timestamp to dump.").Default(strconv.FormatInt(math.MinInt64, 10)).Int64()
|
||||||
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
dumpOpenMetricsMaxTime := tsdbDumpOpenMetricsCmd.Flag("max-time", "Maximum timestamp to dump.").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
|
||||||
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
dumpOpenMetricsMatch := tsdbDumpOpenMetricsCmd.Flag("match", "Series selector. Can be specified multiple times.").Default("{__name__=~'(?s:.*)'}").Strings()
|
||||||
|
@ -286,7 +293,7 @@ func main() {
|
||||||
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
|
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
|
||||||
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
|
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
|
||||||
|
|
||||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings()
|
||||||
|
|
||||||
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
||||||
|
|
||||||
|
@ -316,26 +323,21 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var noDefaultScrapePort bool
|
|
||||||
for _, f := range *featureList {
|
for _, f := range *featureList {
|
||||||
opts := strings.Split(f, ",")
|
opts := strings.Split(f, ",")
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
switch o {
|
switch o {
|
||||||
case "no-default-scrape-port":
|
|
||||||
noDefaultScrapePort = true
|
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "promql-at-modifier", "promql-negative-offset":
|
|
||||||
fmt.Printf(" WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o)
|
|
||||||
default:
|
default:
|
||||||
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
|
fmt.Printf(" WARNING: --enable-feature is currently a no-op")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch parsedCmd {
|
switch parsedCmd {
|
||||||
case sdCheckCmd.FullCommand():
|
case sdCheckCmd.FullCommand():
|
||||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer))
|
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
|
||||||
|
|
||||||
case checkConfigCmd.FullCommand():
|
case checkConfigCmd.FullCommand():
|
||||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||||
|
@ -391,6 +393,7 @@ func main() {
|
||||||
},
|
},
|
||||||
*testRulesRun,
|
*testRulesRun,
|
||||||
*testRulesDiff,
|
*testRulesDiff,
|
||||||
|
*testRulesDebug,
|
||||||
*testRulesFiles...),
|
*testRulesFiles...),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -441,7 +444,7 @@ func checkExperimental(f bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var errLint = fmt.Errorf("lint error")
|
var errLint = errors.New("lint error")
|
||||||
|
|
||||||
type lintConfig struct {
|
type lintConfig struct {
|
||||||
all bool
|
all bool
|
||||||
|
@ -575,7 +578,7 @@ func checkFileExists(fn string) error {
|
||||||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
||||||
fmt.Println("Checking", filename)
|
fmt.Println("Checking", filename)
|
||||||
|
|
||||||
cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger())
|
cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -895,30 +898,30 @@ func compare(a, b compareRuleType) int {
|
||||||
|
|
||||||
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
|
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
|
||||||
var duplicates []compareRuleType
|
var duplicates []compareRuleType
|
||||||
var rules compareRuleTypes
|
var cRules compareRuleTypes
|
||||||
|
|
||||||
for _, group := range groups {
|
for _, group := range groups {
|
||||||
for _, rule := range group.Rules {
|
for _, rule := range group.Rules {
|
||||||
rules = append(rules, compareRuleType{
|
cRules = append(cRules, compareRuleType{
|
||||||
metric: ruleMetric(rule),
|
metric: ruleMetric(rule),
|
||||||
label: labels.FromMap(rule.Labels),
|
label: rules.FromMaps(group.Labels, rule.Labels),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(rules) < 2 {
|
if len(cRules) < 2 {
|
||||||
return duplicates
|
return duplicates
|
||||||
}
|
}
|
||||||
sort.Sort(rules)
|
sort.Sort(cRules)
|
||||||
|
|
||||||
last := rules[0]
|
last := cRules[0]
|
||||||
for i := 1; i < len(rules); i++ {
|
for i := 1; i < len(cRules); i++ {
|
||||||
if compare(last, rules[i]) == 0 {
|
if compare(last, cRules[i]) == 0 {
|
||||||
// Don't add a duplicated rule multiple times.
|
// Don't add a duplicated rule multiple times.
|
||||||
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
|
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
|
||||||
duplicates = append(duplicates, rules[i])
|
duplicates = append(duplicates, cRules[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
last = rules[i]
|
last = cRules[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
return duplicates
|
return duplicates
|
||||||
|
@ -1182,7 +1185,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
|
||||||
return fmt.Errorf("new api client error: %w", err)
|
return fmt.Errorf("new api client error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api)
|
ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api)
|
||||||
errs := ruleImporter.loadGroups(ctx, files)
|
errs := ruleImporter.loadGroups(ctx, files)
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1216,7 +1219,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
|
||||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, tg := range targetGroups {
|
for _, tg := range targetGroups {
|
||||||
var failures []error
|
var failures []error
|
||||||
targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb)
|
targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb)
|
||||||
if len(failures) > 0 {
|
if len(failures) > 0 {
|
||||||
first := failures[0]
|
first := failures[0]
|
||||||
return first
|
return first
|
||||||
|
|
|
@ -31,12 +31,19 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/rulefmt"
|
"github.com/prometheus/prometheus/model/rulefmt"
|
||||||
|
"github.com/prometheus/prometheus/promql/promqltest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// This can be removed when the default validation scheme in common is updated.
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}
|
||||||
|
|
||||||
var promtoolPath = os.Args[0]
|
var promtoolPath = os.Args[0]
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
|
@ -53,6 +60,7 @@ func TestMain(m *testing.M) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQueryRange(t *testing.T) {
|
func TestQueryRange(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
|
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
|
@ -76,6 +84,7 @@ func TestQueryRange(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestQueryInstant(t *testing.T) {
|
func TestQueryInstant(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`)
|
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`)
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
|
@ -107,6 +116,7 @@ func mockServer(code int, body string) (*httptest.Server, func() *http.Request)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckSDFile(t *testing.T) {
|
func TestCheckSDFile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
file string
|
file string
|
||||||
|
@ -137,9 +147,10 @@ func TestCheckSDFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := checkSDFile(test.file)
|
_, err := checkSDFile(test.file)
|
||||||
if test.err != "" {
|
if test.err != "" {
|
||||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -148,6 +159,7 @@ func TestCheckSDFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckDuplicates(t *testing.T) {
|
func TestCheckDuplicates(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
ruleFile string
|
ruleFile string
|
||||||
|
@ -172,6 +184,7 @@ func TestCheckDuplicates(t *testing.T) {
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
c := test
|
c := test
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
rgs, err := rulefmt.ParseFile(c.ruleFile)
|
rgs, err := rulefmt.ParseFile(c.ruleFile)
|
||||||
require.Empty(t, err)
|
require.Empty(t, err)
|
||||||
dups := checkDuplicates(rgs.Groups)
|
dups := checkDuplicates(rgs.Groups)
|
||||||
|
@ -191,6 +204,7 @@ func BenchmarkCheckDuplicates(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckTargetConfig(t *testing.T) {
|
func TestCheckTargetConfig(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
file string
|
file string
|
||||||
|
@ -219,9 +233,10 @@ func TestCheckTargetConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||||
if test.err != "" {
|
if test.err != "" {
|
||||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -230,6 +245,7 @@ func TestCheckTargetConfig(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckConfigSyntax(t *testing.T) {
|
func TestCheckConfigSyntax(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
file string
|
file string
|
||||||
|
@ -302,13 +318,14 @@ func TestCheckConfigSyntax(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
||||||
expectedErrMsg := test.err
|
expectedErrMsg := test.err
|
||||||
if strings.Contains(runtime.GOOS, "windows") {
|
if strings.Contains(runtime.GOOS, "windows") {
|
||||||
expectedErrMsg = test.errWindows
|
expectedErrMsg = test.errWindows
|
||||||
}
|
}
|
||||||
if expectedErrMsg != "" {
|
if expectedErrMsg != "" {
|
||||||
require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -317,6 +334,7 @@ func TestCheckConfigSyntax(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAuthorizationConfig(t *testing.T) {
|
func TestAuthorizationConfig(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
file string
|
file string
|
||||||
|
@ -336,9 +354,10 @@ func TestAuthorizationConfig(t *testing.T) {
|
||||||
|
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||||
if test.err != "" {
|
if test.err != "" {
|
||||||
require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -350,6 +369,7 @@ func TestCheckMetricsExtended(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
t.Skip("Skipping on windows")
|
t.Skip("Skipping on windows")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
f, err := os.Open("testdata/metrics-test.prom")
|
f, err := os.Open("testdata/metrics-test.prom")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -386,6 +406,7 @@ func TestExitCodes(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
for _, c := range []struct {
|
for _, c := range []struct {
|
||||||
file string
|
file string
|
||||||
|
@ -410,8 +431,10 @@ func TestExitCodes(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(c.file, func(t *testing.T) {
|
t.Run(c.file, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
for _, lintFatal := range []bool{true, false} {
|
for _, lintFatal := range []bool{true, false} {
|
||||||
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
|
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
||||||
if lintFatal {
|
if lintFatal {
|
||||||
args = append(args, "--lint-fatal")
|
args = append(args, "--lint-fatal")
|
||||||
|
@ -442,6 +465,7 @@ func TestDocumentation(t *testing.T) {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
t.SkipNow()
|
t.SkipNow()
|
||||||
}
|
}
|
||||||
|
t.Parallel()
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
@ -535,17 +559,65 @@ func TestCheckRules(t *testing.T) {
|
||||||
|
|
||||||
func TestCheckRulesWithRuleFiles(t *testing.T) {
|
func TestCheckRulesWithRuleFiles(t *testing.T) {
|
||||||
t.Run("rules-good", func(t *testing.T) {
|
t.Run("rules-good", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
|
||||||
require.Equal(t, successExitCode, exitCode, "")
|
require.Equal(t, successExitCode, exitCode, "")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rules-bad", func(t *testing.T) {
|
t.Run("rules-bad", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
|
||||||
require.Equal(t, failureExitCode, exitCode, "")
|
require.Equal(t, failureExitCode, exitCode, "")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
|
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
|
||||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTSDBDumpCommand(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
storage := promqltest.LoadedStorage(t, `
|
||||||
|
load 1m
|
||||||
|
metric{foo="bar"} 1 2 3
|
||||||
|
`)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
|
for _, c := range []struct {
|
||||||
|
name string
|
||||||
|
subCmd string
|
||||||
|
sandboxDirRoot string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "dump",
|
||||||
|
subCmd: "dump",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dump with sandbox dir root",
|
||||||
|
subCmd: "dump",
|
||||||
|
sandboxDirRoot: t.TempDir(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dump-openmetrics",
|
||||||
|
subCmd: "dump-openmetrics",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dump-openmetrics with sandbox dir root",
|
||||||
|
subCmd: "dump-openmetrics",
|
||||||
|
sandboxDirRoot: t.TempDir(),
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(c.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()}
|
||||||
|
cmd := exec.Command(promtoolPath, args...)
|
||||||
|
require.NoError(t, cmd.Run())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -16,12 +16,12 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
@ -38,7 +38,7 @@ type queryRangeAPI interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ruleImporter struct {
|
type ruleImporter struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
config ruleImporterConfig
|
config ruleImporterConfig
|
||||||
|
|
||||||
apiClient queryRangeAPI
|
apiClient queryRangeAPI
|
||||||
|
@ -57,8 +57,8 @@ type ruleImporterConfig struct {
|
||||||
|
|
||||||
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
||||||
// written to disk in blocks.
|
// written to disk in blocks.
|
||||||
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
||||||
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
||||||
return &ruleImporter{
|
return &ruleImporter{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
config: config,
|
config: config,
|
||||||
|
@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string)
|
||||||
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
|
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
|
||||||
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
||||||
for name, group := range importer.groups {
|
for name, group := range importer.groups {
|
||||||
level.Info(importer.logger).Log("backfiller", "processing group", "name", name)
|
importer.logger.Info("processing group", "component", "backfiller", "name", name)
|
||||||
|
|
||||||
for i, r := range group.Rules() {
|
for i, r := range group.Rules() {
|
||||||
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name())
|
importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name())
|
||||||
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
return fmt.Errorf("query range: %w", err)
|
return fmt.Errorf("query range: %w", err)
|
||||||
}
|
}
|
||||||
if warnings != nil {
|
if warnings != nil {
|
||||||
level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings)
|
importer.logger.Warn("Range query returned warnings.", "warnings", warnings)
|
||||||
}
|
}
|
||||||
|
|
||||||
// To prevent races with compaction, a block writer only allows appending samples
|
// To prevent races with compaction, a block writer only allows appending samples
|
||||||
|
@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
// also need to append samples throughout the whole block range. To allow that, we
|
// also need to append samples throughout the whole block range. To allow that, we
|
||||||
// pretend that the block is twice as large here, but only really add sample in the
|
// pretend that the block is twice as large here, but only really add sample in the
|
||||||
// original interval later.
|
// original interval later.
|
||||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new block writer: %w", err)
|
return fmt.Errorf("new block writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
@ -43,6 +43,7 @@ const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Mil
|
||||||
|
|
||||||
// TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together.
|
// TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together.
|
||||||
func TestBackfillRuleIntegration(t *testing.T) {
|
func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
const (
|
const (
|
||||||
testMaxSampleCount = 50
|
testMaxSampleCount = 50
|
||||||
testValue = 123
|
testValue = 123
|
||||||
|
@ -72,6 +73,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range testCases {
|
for _, tt := range testCases {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -161,7 +163,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||||
logger := log.NewNopLogger()
|
logger := promslog.NewNopLogger()
|
||||||
cfg := ruleImporterConfig{
|
cfg := ruleImporterConfig{
|
||||||
outputDir: tmpDir,
|
outputDir: tmpDir,
|
||||||
start: start.Add(-10 * time.Hour),
|
start: start.Add(-10 * time.Hour),
|
||||||
|
@ -210,6 +212,7 @@ func createMultiRuleTestFiles(path string) error {
|
||||||
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
|
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
|
||||||
// received from Prometheus Query API, including the __name__ label.
|
// received from Prometheus Query API, including the __name__ label.
|
||||||
func TestBackfillLabels(t *testing.T) {
|
func TestBackfillLabels(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -251,6 +254,7 @@ func TestBackfillLabels(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("correct-labels", func(t *testing.T) {
|
t.Run("correct-labels", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||||
for selectedSeries.Next() {
|
for selectedSeries.Next() {
|
||||||
series := selectedSeries.At()
|
series := selectedSeries.At()
|
||||||
|
|
|
@ -20,9 +20,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -38,10 +38,10 @@ type sdCheckResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSD performs service discovery for the given job name and reports the results.
|
// CheckSD performs service discovery for the given job name and reports the results.
|
||||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int {
|
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
|
||||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
logger := promslog.New(&promslog.Config{})
|
||||||
|
|
||||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
cfg, err := config.LoadFile(sdConfigFiles, false, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
||||||
return failureExitCode
|
return failureExitCode
|
||||||
|
@ -114,7 +114,7 @@ outerLoop:
|
||||||
}
|
}
|
||||||
results := []sdCheckResult{}
|
results := []sdCheckResult{}
|
||||||
for _, tgs := range sdCheckResults {
|
for _, tgs := range sdCheckResults {
|
||||||
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
|
results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := json.MarshalIndent(results, "", " ")
|
res, err := json.MarshalIndent(results, "", " ")
|
||||||
|
@ -127,7 +127,7 @@ outerLoop:
|
||||||
return successExitCode
|
return successExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
|
||||||
sdCheckResults := []sdCheckResult{}
|
sdCheckResults := []sdCheckResult{}
|
||||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, targetGroup := range targetGroups {
|
for _, targetGroup := range targetGroups {
|
||||||
|
@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort)
|
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
|
||||||
result := sdCheckResult{
|
result := sdCheckResult{
|
||||||
DiscoveredLabels: orig,
|
DiscoveredLabels: orig,
|
||||||
Labels: res,
|
Labels: res,
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSDCheckResult(t *testing.T) {
|
func TestSDCheckResult(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
targetGroups := []*targetgroup.Group{{
|
targetGroups := []*targetgroup.Group{{
|
||||||
Targets: []model.LabelSet{
|
Targets: []model.LabelSet{
|
||||||
map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},
|
map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},
|
||||||
|
@ -70,5 +71,5 @@ func TestSDCheckResult(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
|
testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ scrape_configs:
|
||||||
alerting:
|
alerting:
|
||||||
alertmanagers:
|
alertmanagers:
|
||||||
- scheme: http
|
- scheme: http
|
||||||
api_version: v1
|
api_version: v2
|
||||||
file_sd_configs:
|
file_sd_configs:
|
||||||
- files:
|
- files:
|
||||||
- nonexistent_file.yml
|
- nonexistent_file.yml
|
||||||
|
|
14
cmd/promtool/testdata/unittest.yml
vendored
14
cmd/promtool/testdata/unittest.yml
vendored
|
@ -69,13 +69,13 @@ tests:
|
||||||
eval_time: 2m
|
eval_time: 2m
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- labels: "test_histogram_repeat"
|
- labels: "test_histogram_repeat"
|
||||||
histogram: "{{count:2 sum:3 buckets:[2]}}"
|
histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}"
|
||||||
|
|
||||||
- expr: test_histogram_increase
|
- expr: test_histogram_increase
|
||||||
eval_time: 2m
|
eval_time: 2m
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- labels: "test_histogram_increase"
|
- labels: "test_histogram_increase"
|
||||||
histogram: "{{count:4 sum:5.6 buckets:[4]}}"
|
histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}"
|
||||||
|
|
||||||
# Ensure a value is stale as soon as it is marked as such.
|
# Ensure a value is stale as soon as it is marked as such.
|
||||||
- expr: test_stale
|
- expr: test_stale
|
||||||
|
@ -89,11 +89,11 @@ tests:
|
||||||
|
|
||||||
# Ensure lookback delta is respected, when a value is missing.
|
# Ensure lookback delta is respected, when a value is missing.
|
||||||
- expr: timestamp(test_missing)
|
- expr: timestamp(test_missing)
|
||||||
eval_time: 5m
|
eval_time: 4m59s
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- value: 0
|
- value: 0
|
||||||
- expr: timestamp(test_missing)
|
- expr: timestamp(test_missing)
|
||||||
eval_time: 5m1s
|
eval_time: 5m
|
||||||
exp_samples: []
|
exp_samples: []
|
||||||
|
|
||||||
# Minimal test case to check edge case of a single sample.
|
# Minimal test case to check edge case of a single sample.
|
||||||
|
@ -113,7 +113,7 @@ tests:
|
||||||
- expr: count_over_time(fixed_data[1h])
|
- expr: count_over_time(fixed_data[1h])
|
||||||
eval_time: 1h
|
eval_time: 1h
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- value: 61
|
- value: 60
|
||||||
- expr: timestamp(fixed_data)
|
- expr: timestamp(fixed_data)
|
||||||
eval_time: 1h
|
eval_time: 1h
|
||||||
exp_samples:
|
exp_samples:
|
||||||
|
@ -183,7 +183,7 @@ tests:
|
||||||
- expr: job:test:count_over_time1m
|
- expr: job:test:count_over_time1m
|
||||||
eval_time: 1m
|
eval_time: 1m
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- value: 61
|
- value: 60
|
||||||
labels: 'job:test:count_over_time1m{job="test"}'
|
labels: 'job:test:count_over_time1m{job="test"}'
|
||||||
- expr: timestamp(job:test:count_over_time1m)
|
- expr: timestamp(job:test:count_over_time1m)
|
||||||
eval_time: 1m10s
|
eval_time: 1m10s
|
||||||
|
@ -194,7 +194,7 @@ tests:
|
||||||
- expr: job:test:count_over_time1m
|
- expr: job:test:count_over_time1m
|
||||||
eval_time: 2m
|
eval_time: 2m
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- value: 61
|
- value: 60
|
||||||
labels: 'job:test:count_over_time1m{job="test"}'
|
labels: 'job:test:count_over_time1m{job="test"}'
|
||||||
- expr: timestamp(job:test:count_over_time1m)
|
- expr: timestamp(job:test:count_over_time1m)
|
||||||
eval_time: 2m59s999ms
|
eval_time: 2m59s999ms
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -32,9 +33,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -60,7 +62,7 @@ type writeBenchmark struct {
|
||||||
memprof *os.File
|
memprof *os.File
|
||||||
blockprof *os.File
|
blockprof *os.File
|
||||||
mtxprof *os.File
|
mtxprof *os.File
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
|
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
|
||||||
|
@ -68,7 +70,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
||||||
outPath: outPath,
|
outPath: outPath,
|
||||||
samplesFile: samplesFile,
|
samplesFile: samplesFile,
|
||||||
numMetrics: numMetrics,
|
numMetrics: numMetrics,
|
||||||
logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)),
|
logger: promslog.New(&promslog.Config{}),
|
||||||
}
|
}
|
||||||
if b.outPath == "" {
|
if b.outPath == "" {
|
||||||
dir, err := os.MkdirTemp("", "tsdb_bench")
|
dir, err := os.MkdirTemp("", "tsdb_bench")
|
||||||
|
@ -87,9 +89,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
||||||
|
|
||||||
dir := filepath.Join(b.outPath, "storage")
|
dir := filepath.Join(b.outPath, "storage")
|
||||||
|
|
||||||
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{
|
||||||
|
|
||||||
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
|
|
||||||
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||||
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
||||||
}, tsdb.NewDBStats())
|
}, tsdb.NewDBStats())
|
||||||
|
@ -367,25 +367,25 @@ func printBlocks(blocks []tsdb.BlockReader, writeHeader, humanReadable bool) {
|
||||||
fmt.Fprintf(tw,
|
fmt.Fprintf(tw,
|
||||||
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
|
"%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
|
||||||
meta.ULID,
|
meta.ULID,
|
||||||
getFormatedTime(meta.MinTime, humanReadable),
|
getFormattedTime(meta.MinTime, humanReadable),
|
||||||
getFormatedTime(meta.MaxTime, humanReadable),
|
getFormattedTime(meta.MaxTime, humanReadable),
|
||||||
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
|
time.Duration(meta.MaxTime-meta.MinTime)*time.Millisecond,
|
||||||
meta.Stats.NumSamples,
|
meta.Stats.NumSamples,
|
||||||
meta.Stats.NumChunks,
|
meta.Stats.NumChunks,
|
||||||
meta.Stats.NumSeries,
|
meta.Stats.NumSeries,
|
||||||
getFormatedBytes(b.Size(), humanReadable),
|
getFormattedBytes(b.Size(), humanReadable),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFormatedTime(timestamp int64, humanReadable bool) string {
|
func getFormattedTime(timestamp int64, humanReadable bool) string {
|
||||||
if humanReadable {
|
if humanReadable {
|
||||||
return time.Unix(timestamp/1000, 0).UTC().String()
|
return time.Unix(timestamp/1000, 0).UTC().String()
|
||||||
}
|
}
|
||||||
return strconv.FormatInt(timestamp, 10)
|
return strconv.FormatInt(timestamp, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFormatedBytes(bytes int64, humanReadable bool) string {
|
func getFormattedBytes(bytes int64, humanReadable bool) string {
|
||||||
if humanReadable {
|
if humanReadable {
|
||||||
return units.Base2Bytes(bytes).String()
|
return units.Base2Bytes(bytes).String()
|
||||||
}
|
}
|
||||||
|
@ -405,7 +405,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := db.Block(blockID)
|
b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -589,7 +589,10 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Only intersect postings if matchers are specified.
|
||||||
|
if len(matchers) > 0 {
|
||||||
postings = index.Intersect(postings, index.NewListPostings(refs))
|
postings = index.Intersect(postings, index.NewListPostings(refs))
|
||||||
|
}
|
||||||
count := 0
|
count := 0
|
||||||
for postings.Next() {
|
for postings.Next() {
|
||||||
count++
|
count++
|
||||||
|
@ -662,7 +665,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
||||||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||||
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
|
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("chunk is not FloatHistogramChunk")
|
return errors.New("chunk is not FloatHistogramChunk")
|
||||||
}
|
}
|
||||||
it := fhchk.Iterator(nil)
|
it := fhchk.Iterator(nil)
|
||||||
bucketCount := 0
|
bucketCount := 0
|
||||||
|
@ -677,7 +680,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
||||||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||||
hchk, ok := chk.(*chunkenc.HistogramChunk)
|
hchk, ok := chk.(*chunkenc.HistogramChunk)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("chunk is not HistogramChunk")
|
return errors.New("chunk is not HistogramChunk")
|
||||||
}
|
}
|
||||||
it := hchk.Iterator(nil)
|
it := hchk.Iterator(nil)
|
||||||
bucketCount := 0
|
bucketCount := 0
|
||||||
|
@ -733,7 +736,7 @@ func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt i
|
||||||
for _, mset := range matcherSets {
|
for _, mset := range matcherSets {
|
||||||
sets = append(sets, q.Select(ctx, true, nil, mset...))
|
sets = append(sets, q.Select(ctx, true, nil, mset...))
|
||||||
}
|
}
|
||||||
ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
|
ss = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge)
|
||||||
} else {
|
} else {
|
||||||
ss = q.Select(ctx, false, nil, matcherSets[0]...)
|
ss = q.Select(ctx, false, nil, matcherSets[0]...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGenerateBucket(t *testing.T) {
|
func TestGenerateBucket(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
tcs := []struct {
|
tcs := []struct {
|
||||||
min, max int
|
min, max int
|
||||||
start, end, step int
|
start, end, step int
|
||||||
|
@ -55,7 +56,7 @@ func TestGenerateBucket(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDumpedSamples dumps samples and returns them.
|
// getDumpedSamples dumps samples and returns them.
|
||||||
func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string {
|
func getDumpedSamples(t *testing.T, databasePath, sandboxDirRoot string, mint, maxt int64, match []string, formatter SeriesSetFormatter) string {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
oldStdout := os.Stdout
|
oldStdout := os.Stdout
|
||||||
|
@ -64,8 +65,8 @@ func getDumpedSamples(t *testing.T, path string, mint, maxt int64, match []strin
|
||||||
|
|
||||||
err := dumpSamples(
|
err := dumpSamples(
|
||||||
context.Background(),
|
context.Background(),
|
||||||
path,
|
databasePath,
|
||||||
t.TempDir(),
|
sandboxDirRoot,
|
||||||
mint,
|
mint,
|
||||||
maxt,
|
maxt,
|
||||||
match,
|
match,
|
||||||
|
@ -96,11 +97,13 @@ func TestTSDBDump(t *testing.T) {
|
||||||
heavy_metric{foo="bar"} 5 4 3 2 1
|
heavy_metric{foo="bar"} 5 4 3 2 1
|
||||||
heavy_metric{foo="foo"} 5 4 3 2 1
|
heavy_metric{foo="foo"} 5 4 3 2 1
|
||||||
`)
|
`)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
mint int64
|
mint int64
|
||||||
maxt int64
|
maxt int64
|
||||||
|
sandboxDirRoot string
|
||||||
match []string
|
match []string
|
||||||
expectedDump string
|
expectedDump string
|
||||||
}{
|
}{
|
||||||
|
@ -111,6 +114,14 @@ func TestTSDBDump(t *testing.T) {
|
||||||
match: []string{"{__name__=~'(?s:.*)'}"},
|
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||||
expectedDump: "testdata/dump-test-1.prom",
|
expectedDump: "testdata/dump-test-1.prom",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "default match with sandbox dir root set",
|
||||||
|
mint: math.MinInt64,
|
||||||
|
maxt: math.MaxInt64,
|
||||||
|
sandboxDirRoot: t.TempDir(),
|
||||||
|
match: []string{"{__name__=~'(?s:.*)'}"},
|
||||||
|
expectedDump: "testdata/dump-test-1.prom",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "same matcher twice",
|
name: "same matcher twice",
|
||||||
mint: math.MinInt64,
|
mint: math.MinInt64,
|
||||||
|
@ -149,7 +160,7 @@ func TestTSDBDump(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.mint, tt.maxt, tt.match, formatSeriesSet)
|
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, tt.mint, tt.maxt, tt.match, formatSeriesSet)
|
||||||
expectedMetrics, err := os.ReadFile(tt.expectedDump)
|
expectedMetrics, err := os.ReadFile(tt.expectedDump)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||||
|
@ -171,12 +182,29 @@ func TestTSDBDumpOpenMetrics(t *testing.T) {
|
||||||
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
|
my_counter{foo="bar", baz="abc"} 1 2 3 4 5
|
||||||
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
|
my_gauge{bar="foo", abc="baz"} 9 8 0 4 7
|
||||||
`)
|
`)
|
||||||
|
t.Cleanup(func() { storage.Close() })
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
sandboxDirRoot string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default match",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "default match with sandbox dir root set",
|
||||||
|
sandboxDirRoot: t.TempDir(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom")
|
expectedMetrics, err := os.ReadFile("testdata/dump-openmetrics-test.prom")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedMetrics = normalizeNewLine(expectedMetrics)
|
expectedMetrics = normalizeNewLine(expectedMetrics)
|
||||||
dumpedMetrics := getDumpedSamples(t, storage.Dir(), math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
dumpedMetrics := getDumpedSamples(t, storage.Dir(), tt.sandboxDirRoot, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||||
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
require.Equal(t, sortLines(string(expectedMetrics)), sortLines(dumpedMetrics))
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
|
func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
|
||||||
|
@ -195,7 +223,7 @@ func TestTSDBDumpOpenMetricsRoundTrip(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
// Dump the blocks into OM format
|
// Dump the blocks into OM format
|
||||||
dumpedMetrics := getDumpedSamples(t, dbDir, math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
dumpedMetrics := getDumpedSamples(t, dbDir, "", math.MinInt64, math.MaxInt64, []string{"{__name__=~'(?s:.*)'}"}, formatSeriesSetOpenMetrics)
|
||||||
|
|
||||||
// Should get back the initial metrics.
|
// Should get back the initial metrics.
|
||||||
require.Equal(t, string(initialMetrics), dumpedMetrics)
|
require.Equal(t, string(initialMetrics), dumpedMetrics)
|
||||||
|
|
|
@ -26,13 +26,13 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/nsf/jsondiff"
|
"github.com/nsf/jsondiff"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
@ -46,11 +46,11 @@ import (
|
||||||
|
|
||||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||||
// More info about the file format can be found in the docs.
|
// More info about the file format can be found in the docs.
|
||||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
|
||||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
|
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, files...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug bool, files ...string) int {
|
||||||
failed := false
|
failed := false
|
||||||
junit := &junitxml.JUnitXML{}
|
junit := &junitxml.JUnitXML{}
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
|
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, junit.Suite(f)); errs != nil {
|
||||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||||
for _, e := range errs {
|
for _, e := range errs {
|
||||||
fmt.Fprintln(os.Stderr, e.Error())
|
fmt.Fprintln(os.Stderr, e.Error())
|
||||||
|
@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
||||||
return successExitCode
|
return successExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
|
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug bool, ts *junitxml.TestSuite) []error {
|
||||||
b, err := os.ReadFile(filename)
|
b, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ts.Abort(err)
|
ts.Abort(err)
|
||||||
|
@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
|
||||||
if t.Interval == 0 {
|
if t.Interval == 0 {
|
||||||
t.Interval = unitTestInp.EvaluationInterval
|
t.Interval = unitTestInp.EvaluationInterval
|
||||||
}
|
}
|
||||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
|
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, unitTestInp.RuleFiles...)
|
||||||
if ers != nil {
|
if ers != nil {
|
||||||
for _, e := range ers {
|
for _, e := range ers {
|
||||||
tc.Fail(e.Error())
|
tc.Fail(e.Error())
|
||||||
|
@ -198,7 +198,14 @@ type testGroup struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// test performs the unit tests.
|
// test performs the unit tests.
|
||||||
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
|
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug bool, ruleFiles ...string) (outErr []error) {
|
||||||
|
if debug {
|
||||||
|
testStart := time.Now()
|
||||||
|
fmt.Printf("DEBUG: Starting test %s\n", testname)
|
||||||
|
defer func() {
|
||||||
|
fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart))
|
||||||
|
}()
|
||||||
|
}
|
||||||
// Setup testing suite.
|
// Setup testing suite.
|
||||||
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
|
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -218,7 +225,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
||||||
Appendable: suite.Storage(),
|
Appendable: suite.Storage(),
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
|
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
|
||||||
Logger: log.NewNopLogger(),
|
Logger: promslog.NewNopLogger(),
|
||||||
}
|
}
|
||||||
m := rules.NewManager(opts)
|
m := rules.NewManager(opts)
|
||||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
||||||
|
@ -482,6 +489,32 @@ Outer:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if debug {
|
||||||
|
ts := tg.maxEvalTime()
|
||||||
|
// Potentially a test can be specified at a time with fractional seconds,
|
||||||
|
// which PromQL cannot represent, so round up to the next whole second.
|
||||||
|
ts = (ts + time.Second).Truncate(time.Second)
|
||||||
|
expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts)
|
||||||
|
q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err)
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
res := q.Exec(suite.Context())
|
||||||
|
if res.Err != nil {
|
||||||
|
fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err)
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
switch v := res.Value.(type) {
|
||||||
|
case promql.Matrix:
|
||||||
|
fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts)
|
||||||
|
fmt.Println(v.String())
|
||||||
|
default:
|
||||||
|
fmt.Printf("DEBUG: Got unexpected type %T\n", v)
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRulesUnitTest(t *testing.T) {
|
func TestRulesUnitTest(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type args struct {
|
type args struct {
|
||||||
files []string
|
files []string
|
||||||
}
|
}
|
||||||
|
@ -141,14 +142,16 @@ func TestRulesUnitTest(t *testing.T) {
|
||||||
reuseCount[tt.want] += len(tt.args.files)
|
reuseCount[tt.want] += len(tt.args.files)
|
||||||
}
|
}
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
|
t.Parallel()
|
||||||
|
if got := RulesUnitTest(tt.queryOpts, nil, false, false, tt.args.files...); got != tt.want {
|
||||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
t.Run("Junit xml output ", func(t *testing.T) {
|
t.Run("Junit xml output ", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
|
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, reuseFiles...); got != 1 {
|
||||||
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
|
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
|
||||||
}
|
}
|
||||||
var test junitxml.JUnitXML
|
var test junitxml.JUnitXML
|
||||||
|
@ -185,6 +188,7 @@ func TestRulesUnitTest(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRulesUnitTestRun(t *testing.T) {
|
func TestRulesUnitTestRun(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
type args struct {
|
type args struct {
|
||||||
run []string
|
run []string
|
||||||
files []string
|
files []string
|
||||||
|
@ -230,7 +234,8 @@ func TestRulesUnitTestRun(t *testing.T) {
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...)
|
t.Parallel()
|
||||||
|
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.args.files...)
|
||||||
require.Equal(t, tt.want, got)
|
require.Equal(t, tt.want, got)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
118
config/config.go
118
config/config.go
|
@ -16,6 +16,8 @@ package config
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"mime"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -25,12 +27,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/sigv4"
|
"github.com/prometheus/sigv4"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -73,7 +73,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Load parses the YAML input s into a Config.
|
// Load parses the YAML input s into a Config.
|
||||||
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
func Load(s string, logger *slog.Logger) (*Config, error) {
|
||||||
cfg := &Config{}
|
cfg := &Config{}
|
||||||
// If the entire config body is empty the UnmarshalYAML method is
|
// If the entire config body is empty the UnmarshalYAML method is
|
||||||
// never called. We thus have to set the DefaultConfig at the entry
|
// never called. We thus have to set the DefaultConfig at the entry
|
||||||
|
@ -85,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !expandExternalLabels {
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b := labels.NewScratchBuilder(0)
|
b := labels.NewScratchBuilder(0)
|
||||||
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
|
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
|
||||||
newV := os.Expand(v.Value, func(s string) string {
|
newV := os.Expand(v.Value, func(s string) string {
|
||||||
|
@ -98,26 +94,40 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
||||||
if v := os.Getenv(s); v != "" {
|
if v := os.Getenv(s); v != "" {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
level.Warn(logger).Log("msg", "Empty environment variable", "name", s)
|
logger.Warn("Empty environment variable", "name", s)
|
||||||
return ""
|
return ""
|
||||||
})
|
})
|
||||||
if newV != v.Value {
|
if newV != v.Value {
|
||||||
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
||||||
}
|
}
|
||||||
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
||||||
b.Add(v.Name, newV)
|
b.Add(v.Name, newV)
|
||||||
})
|
})
|
||||||
|
if !b.Labels().IsEmpty() {
|
||||||
cfg.GlobalConfig.ExternalLabels = b.Labels()
|
cfg.GlobalConfig.ExternalLabels = b.Labels()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cfg.OTLPConfig.TranslationStrategy {
|
||||||
|
case UnderscoreEscapingWithSuffixes:
|
||||||
|
case "":
|
||||||
|
case NoUTF8EscapingWithSuffixes:
|
||||||
|
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
|
||||||
|
return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
|
||||||
|
}
|
||||||
|
|
||||||
return cfg, nil
|
return cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadFile parses the given YAML file into a Config.
|
// LoadFile parses the given YAML file into a Config.
|
||||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) {
|
||||||
content, err := os.ReadFile(filename)
|
content, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cfg, err := Load(string(content), expandExternalLabels, logger)
|
cfg, err := Load(string(content), logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err)
|
return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err)
|
||||||
}
|
}
|
||||||
|
@ -166,7 +176,7 @@ var (
|
||||||
// DefaultScrapeConfig is the default scrape configuration.
|
// DefaultScrapeConfig is the default scrape configuration.
|
||||||
DefaultScrapeConfig = ScrapeConfig{
|
DefaultScrapeConfig = ScrapeConfig{
|
||||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||||
ScrapeClassicHistograms: false,
|
AlwaysScrapeClassicHistograms: false,
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
HonorLabels: false,
|
HonorLabels: false,
|
||||||
|
@ -183,13 +193,18 @@ var (
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{
|
||||||
|
FollowRedirects: true,
|
||||||
|
EnableHTTP2: false,
|
||||||
|
}
|
||||||
|
|
||||||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||||
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
||||||
RemoteTimeout: model.Duration(30 * time.Second),
|
RemoteTimeout: model.Duration(30 * time.Second),
|
||||||
ProtobufMessage: RemoteWriteProtoMsgV1,
|
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||||
QueueConfig: DefaultQueueConfig,
|
QueueConfig: DefaultQueueConfig,
|
||||||
MetadataConfig: DefaultMetadataConfig,
|
MetadataConfig: DefaultMetadataConfig,
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultQueueConfig is the default remote queue configuration.
|
// DefaultQueueConfig is the default remote queue configuration.
|
||||||
|
@ -236,7 +251,9 @@ var (
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultOTLPConfig is the default OTLP configuration.
|
// DefaultOTLPConfig is the default OTLP configuration.
|
||||||
DefaultOTLPConfig = OTLPConfig{}
|
DefaultOTLPConfig = OTLPConfig{
|
||||||
|
TranslationStrategy: UnderscoreEscapingWithSuffixes,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config is the top-level configuration for Prometheus's config files.
|
// Config is the top-level configuration for Prometheus's config files.
|
||||||
|
@ -429,6 +446,8 @@ type GlobalConfig struct {
|
||||||
RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"`
|
RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"`
|
||||||
// File to which PromQL queries are logged.
|
// File to which PromQL queries are logged.
|
||||||
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
QueryLogFile string `yaml:"query_log_file,omitempty"`
|
||||||
|
// File to which scrape failures are logged.
|
||||||
|
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||||
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
// The labels to add to any timeseries that this Prometheus instance scrapes.
|
||||||
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
ExternalLabels labels.Labels `yaml:"external_labels,omitempty"`
|
||||||
// An uncompressed response body larger than this many bytes will cause the
|
// An uncompressed response body larger than this many bytes will cause the
|
||||||
|
@ -474,9 +493,22 @@ func (s ScrapeProtocol) Validate() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol.
|
||||||
|
func (s ScrapeProtocol) HeaderMediaType() string {
|
||||||
|
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s])
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return mediaType
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
||||||
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||||
|
PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0"
|
||||||
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||||
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||||
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
|
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
|
||||||
|
@ -484,6 +516,7 @@ var (
|
||||||
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||||
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||||
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
||||||
|
PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8",
|
||||||
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
||||||
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||||
}
|
}
|
||||||
|
@ -493,6 +526,7 @@ var (
|
||||||
DefaultScrapeProtocols = []ScrapeProtocol{
|
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||||
OpenMetricsText1_0_0,
|
OpenMetricsText1_0_0,
|
||||||
OpenMetricsText0_0_1,
|
OpenMetricsText0_0_1,
|
||||||
|
PrometheusText1_0_0,
|
||||||
PrometheusText0_0_4,
|
PrometheusText0_0_4,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -504,6 +538,7 @@ var (
|
||||||
PrometheusProto,
|
PrometheusProto,
|
||||||
OpenMetricsText1_0_0,
|
OpenMetricsText1_0_0,
|
||||||
OpenMetricsText0_0_1,
|
OpenMetricsText0_0_1,
|
||||||
|
PrometheusText1_0_0,
|
||||||
PrometheusText0_0_4,
|
PrometheusText0_0_4,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -529,6 +564,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error {
|
||||||
// SetDirectory joins any relative file paths with dir.
|
// SetDirectory joins any relative file paths with dir.
|
||||||
func (c *GlobalConfig) SetDirectory(dir string) {
|
func (c *GlobalConfig) SetDirectory(dir string) {
|
||||||
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
|
c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile)
|
||||||
|
c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
@ -591,6 +627,7 @@ func (c *GlobalConfig) isZero() bool {
|
||||||
c.EvaluationInterval == 0 &&
|
c.EvaluationInterval == 0 &&
|
||||||
c.RuleQueryOffset == 0 &&
|
c.RuleQueryOffset == 0 &&
|
||||||
c.QueryLogFile == "" &&
|
c.QueryLogFile == "" &&
|
||||||
|
c.ScrapeFailureLogFile == "" &&
|
||||||
c.ScrapeProtocols == nil
|
c.ScrapeProtocols == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,10 +665,19 @@ type ScrapeConfig struct {
|
||||||
// The protocols to negotiate during a scrape. It tells clients what
|
// The protocols to negotiate during a scrape. It tells clients what
|
||||||
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
||||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
// The fallback protocol to use if the Content-Type provided by the target
|
||||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
// is not provided, blank, or not one of the expected values.
|
||||||
|
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||||
|
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||||
|
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
|
||||||
|
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||||
|
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||||
|
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
|
||||||
|
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||||
|
// File to which scrape failures are logged.
|
||||||
|
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||||
// The HTTP resource path on which to fetch metrics from targets.
|
// The HTTP resource path on which to fetch metrics from targets.
|
||||||
MetricsPath string `yaml:"metrics_path,omitempty"`
|
MetricsPath string `yaml:"metrics_path,omitempty"`
|
||||||
// The URL scheme with which to fetch metrics from targets.
|
// The URL scheme with which to fetch metrics from targets.
|
||||||
|
@ -684,6 +730,7 @@ type ScrapeConfig struct {
|
||||||
func (c *ScrapeConfig) SetDirectory(dir string) {
|
func (c *ScrapeConfig) SetDirectory(dir string) {
|
||||||
c.ServiceDiscoveryConfigs.SetDirectory(dir)
|
c.ServiceDiscoveryConfigs.SetDirectory(dir)
|
||||||
c.HTTPClientConfig.SetDirectory(dir)
|
c.HTTPClientConfig.SetDirectory(dir)
|
||||||
|
c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
@ -765,6 +812,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
if c.KeepDroppedTargets == 0 {
|
if c.KeepDroppedTargets == 0 {
|
||||||
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
c.KeepDroppedTargets = globalConfig.KeepDroppedTargets
|
||||||
}
|
}
|
||||||
|
if c.ScrapeFailureLogFile == "" {
|
||||||
|
c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile
|
||||||
|
}
|
||||||
|
|
||||||
if c.ScrapeProtocols == nil {
|
if c.ScrapeProtocols == nil {
|
||||||
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
c.ScrapeProtocols = globalConfig.ScrapeProtocols
|
||||||
|
@ -773,11 +823,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
||||||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.ScrapeFallbackProtocol != "" {
|
||||||
|
if err := c.ScrapeFallbackProtocol.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch globalConfig.MetricNameValidationScheme {
|
switch globalConfig.MetricNameValidationScheme {
|
||||||
case "", LegacyValidationConfig:
|
case LegacyValidationConfig:
|
||||||
case UTF8ValidationConfig:
|
case "", UTF8ValidationConfig:
|
||||||
if model.NameValidationScheme != model.UTF8Validation {
|
if model.NameValidationScheme != model.UTF8Validation {
|
||||||
return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names")
|
panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme)
|
||||||
|
@ -948,6 +1004,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
|
||||||
|
|
||||||
// AlertmanagerAPIVersion represents a version of the
|
// AlertmanagerAPIVersion represents a version of the
|
||||||
// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'.
|
// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'.
|
||||||
|
// 'v1' is no longer supported.
|
||||||
type AlertmanagerAPIVersion string
|
type AlertmanagerAPIVersion string
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
@ -977,7 +1034,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{
|
var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{
|
||||||
AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2,
|
AlertmanagerAPIVersionV2,
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
|
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
|
||||||
|
@ -1029,7 +1086,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
||||||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||||
|
|
||||||
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
||||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for users putting URLs in target groups.
|
// Check for users putting URLs in target groups.
|
||||||
|
@ -1138,6 +1195,7 @@ type RemoteWriteConfig struct {
|
||||||
Name string `yaml:"name,omitempty"`
|
Name string `yaml:"name,omitempty"`
|
||||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||||
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||||
|
RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"`
|
||||||
// ProtobufMessage specifies the protobuf message to use against the remote
|
// ProtobufMessage specifies the protobuf message to use against the remote
|
||||||
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||||
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
|
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
|
||||||
|
@ -1359,9 +1417,21 @@ func getGoGCEnv() int {
|
||||||
return DefaultRuntimeConfig.GoGC
|
return DefaultRuntimeConfig.GoGC
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type translationStrategyOption string
|
||||||
|
|
||||||
|
var (
|
||||||
|
// NoUTF8EscapingWithSuffixes will keep UTF-8 characters as they are, units and type suffixes will still be added.
|
||||||
|
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
|
||||||
|
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
|
||||||
|
// This option will translate all UTF-8 characters to underscores, while adding units and type suffixes.
|
||||||
|
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
|
||||||
|
)
|
||||||
|
|
||||||
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
||||||
type OTLPConfig struct {
|
type OTLPConfig struct {
|
||||||
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
||||||
|
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
|
||||||
|
KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
|
@ -1377,7 +1447,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
for i, attr := range c.PromoteResourceAttributes {
|
for i, attr := range c.PromoteResourceAttributes {
|
||||||
attr = strings.TrimSpace(attr)
|
attr = strings.TrimSpace(attr)
|
||||||
if attr == "" {
|
if attr == "" {
|
||||||
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
|
err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, exists := seen[attr]; exists {
|
if _, exists := seen[attr]; exists {
|
||||||
|
|
|
@ -24,10 +24,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -62,6 +62,11 @@ import (
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
"github.com/prometheus/prometheus/util/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// This can be removed when the default validation scheme in common is updated.
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}
|
||||||
|
|
||||||
func mustParseURL(u string) *config.URL {
|
func mustParseURL(u string) *config.URL {
|
||||||
parsed, err := url.Parse(u)
|
parsed, err := url.Parse(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -78,6 +83,7 @@ const (
|
||||||
globLabelNameLengthLimit = 200
|
globLabelNameLengthLimit = 200
|
||||||
globLabelValueLengthLimit = 200
|
globLabelValueLengthLimit = 200
|
||||||
globalGoGC = 42
|
globalGoGC = 42
|
||||||
|
globScrapeFailureLogFile = "testdata/fail.log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var expectedConf = &Config{
|
var expectedConf = &Config{
|
||||||
|
@ -85,7 +91,8 @@ var expectedConf = &Config{
|
||||||
ScrapeInterval: model.Duration(15 * time.Second),
|
ScrapeInterval: model.Duration(15 * time.Second),
|
||||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||||
EvaluationInterval: model.Duration(30 * time.Second),
|
EvaluationInterval: model.Duration(30 * time.Second),
|
||||||
QueryLogFile: "",
|
QueryLogFile: "testdata/query.log",
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
ExternalLabels: labels.FromStrings("foo", "bar", "monitor", "codelab"),
|
||||||
|
|
||||||
|
@ -135,7 +142,7 @@ var expectedConf = &Config{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
FollowRedirects: true,
|
FollowRedirects: true,
|
||||||
EnableHTTP2: true,
|
EnableHTTP2: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -151,7 +158,7 @@ var expectedConf = &Config{
|
||||||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||||
},
|
},
|
||||||
FollowRedirects: true,
|
FollowRedirects: true,
|
||||||
EnableHTTP2: true,
|
EnableHTTP2: false,
|
||||||
},
|
},
|
||||||
Headers: map[string]string{"name": "value"},
|
Headers: map[string]string{"name": "value"},
|
||||||
},
|
},
|
||||||
|
@ -161,6 +168,7 @@ var expectedConf = &Config{
|
||||||
PromoteResourceAttributes: []string{
|
PromoteResourceAttributes: []string{
|
||||||
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
|
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
|
||||||
},
|
},
|
||||||
|
TranslationStrategy: UnderscoreEscapingWithSuffixes,
|
||||||
},
|
},
|
||||||
|
|
||||||
RemoteReadConfigs: []*RemoteReadConfig{
|
RemoteReadConfigs: []*RemoteReadConfig{
|
||||||
|
@ -211,6 +219,8 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFallbackProtocol: PrometheusText0_0_4,
|
||||||
|
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -225,6 +235,15 @@ var expectedConf = &Config{
|
||||||
TLSConfig: config.TLSConfig{
|
TLSConfig: config.TLSConfig{
|
||||||
MinVersion: config.TLSVersion(tls.VersionTLS10),
|
MinVersion: config.TLSVersion(tls.VersionTLS10),
|
||||||
},
|
},
|
||||||
|
HTTPHeaders: &config.Headers{
|
||||||
|
Headers: map[string]config.Header{
|
||||||
|
"foo": {
|
||||||
|
Values: []string{"foobar"},
|
||||||
|
Secrets: []config.Secret{"bar", "foo"},
|
||||||
|
Files: []string{filepath.FromSlash("testdata/valid_password_file")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
ServiceDiscoveryConfigs: discovery.Configs{
|
ServiceDiscoveryConfigs: discovery.Configs{
|
||||||
|
@ -314,6 +333,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: 210,
|
LabelNameLengthLimit: 210,
|
||||||
LabelValueLengthLimit: 210,
|
LabelValueLengthLimit: 210,
|
||||||
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
|
ScrapeProtocols: []ScrapeProtocol{PrometheusText0_0_4},
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
HTTPClientConfig: config.HTTPClientConfig{
|
HTTPClientConfig: config.HTTPClientConfig{
|
||||||
BasicAuth: &config.BasicAuth{
|
BasicAuth: &config.BasicAuth{
|
||||||
|
@ -411,6 +431,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -466,6 +487,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: "/metrics",
|
MetricsPath: "/metrics",
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
|
@ -499,6 +521,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -538,6 +561,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -577,6 +601,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -606,6 +631,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -643,6 +669,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -677,6 +704,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -718,6 +746,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -749,6 +778,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -783,6 +813,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -810,6 +841,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -840,6 +872,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: "/federate",
|
MetricsPath: "/federate",
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -870,6 +903,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -900,6 +934,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -927,6 +962,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -962,6 +998,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -996,6 +1033,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1027,6 +1065,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1057,6 +1096,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1091,6 +1131,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1128,6 +1169,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1184,6 +1226,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1211,6 +1254,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
@ -1249,6 +1293,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
@ -1293,6 +1338,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1328,6 +1374,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
|
@ -1357,6 +1404,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1389,6 +1437,7 @@ var expectedConf = &Config{
|
||||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||||
|
ScrapeFailureLogFile: globScrapeFailureLogFile,
|
||||||
|
|
||||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||||
Scheme: DefaultScrapeConfig.Scheme,
|
Scheme: DefaultScrapeConfig.Scheme,
|
||||||
|
@ -1453,8 +1502,13 @@ var expectedConf = &Config{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestYAMLNotLongerSupportedAMApi(t *testing.T) {
|
||||||
|
_, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger())
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
func TestYAMLRoundtrip(t *testing.T) {
|
func TestYAMLRoundtrip(t *testing.T) {
|
||||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger())
|
want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -1467,7 +1521,7 @@ func TestYAMLRoundtrip(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger())
|
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -1482,7 +1536,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||||
|
|
||||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||||
t.Run("good config", func(t *testing.T) {
|
t.Run("good config", func(t *testing.T) {
|
||||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
|
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -1494,25 +1548,101 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("bad config", func(t *testing.T) {
|
t.Run("bad config", func(t *testing.T) {
|
||||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
|
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) {
|
||||||
|
t.Run("good config", func(t *testing.T) {
|
||||||
|
want, err := LoadFile(filepath.Join("testdata", "otlp_allow_keep_identifying_resource_attributes.good.yml"), false, promslog.NewNopLogger())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
out, err := yaml.Marshal(want)
|
||||||
|
require.NoError(t, err)
|
||||||
|
var got Config
|
||||||
|
require.NoError(t, yaml.UnmarshalStrict(out, &got))
|
||||||
|
|
||||||
|
require.True(t, got.OTLPConfig.KeepIdentifyingResourceAttributes)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOTLPAllowUTF8(t *testing.T) {
|
||||||
|
t.Run("good config", func(t *testing.T) {
|
||||||
|
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
|
||||||
|
verify := func(t *testing.T, conf *Config, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("LoadFile", func(t *testing.T) {
|
||||||
|
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||||
|
verify(t, conf, err)
|
||||||
|
})
|
||||||
|
t.Run("Load", func(t *testing.T) {
|
||||||
|
content, err := os.ReadFile(fpath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
conf, err := Load(string(content), promslog.NewNopLogger())
|
||||||
|
verify(t, conf, err)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("incompatible config", func(t *testing.T) {
|
||||||
|
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
|
||||||
|
verify := func(t *testing.T, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("LoadFile", func(t *testing.T) {
|
||||||
|
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||||
|
verify(t, err)
|
||||||
|
})
|
||||||
|
t.Run("Load", func(t *testing.T) {
|
||||||
|
content, err := os.ReadFile(fpath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = Load(string(content), promslog.NewNopLogger())
|
||||||
|
t.Log("err", err)
|
||||||
|
verify(t, err)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("bad config", func(t *testing.T) {
|
||||||
|
fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml")
|
||||||
|
verify := func(t *testing.T, err error) {
|
||||||
|
t.Helper()
|
||||||
|
require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("LoadFile", func(t *testing.T) {
|
||||||
|
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||||
|
verify(t, err)
|
||||||
|
})
|
||||||
|
t.Run("Load", func(t *testing.T) {
|
||||||
|
content, err := os.ReadFile(fpath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = Load(string(content), promslog.NewNopLogger())
|
||||||
|
verify(t, err)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestLoadConfig(t *testing.T) {
|
func TestLoadConfig(t *testing.T) {
|
||||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||||
// an overwritten default field in the global config permanently changes the default.
|
// an overwritten default field in the global config permanently changes the default.
|
||||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, expectedConf, c)
|
require.Equal(t, expectedConf, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScrapeIntervalLarger(t *testing.T) {
|
func TestScrapeIntervalLarger(t *testing.T) {
|
||||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, c.ScrapeConfigs, 1)
|
require.Len(t, c.ScrapeConfigs, 1)
|
||||||
for _, sc := range c.ScrapeConfigs {
|
for _, sc := range c.ScrapeConfigs {
|
||||||
|
@ -1522,7 +1652,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
|
||||||
|
|
||||||
// YAML marshaling must not reveal authentication credentials.
|
// YAML marshaling must not reveal authentication credentials.
|
||||||
func TestElideSecrets(t *testing.T) {
|
func TestElideSecrets(t *testing.T) {
|
||||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
||||||
|
@ -1532,38 +1662,38 @@ func TestElideSecrets(t *testing.T) {
|
||||||
yamlConfig := string(config)
|
yamlConfig := string(config)
|
||||||
|
|
||||||
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
matches := secretRe.FindAllStringIndex(yamlConfig, -1)
|
||||||
require.Len(t, matches, 22, "wrong number of secret matches found")
|
require.Len(t, matches, 24, "wrong number of secret matches found")
|
||||||
require.NotContains(t, yamlConfig, "mysecret",
|
require.NotContains(t, yamlConfig, "mysecret",
|
||||||
"yaml marshal reveals authentication credentials.")
|
"yaml marshal reveals authentication credentials.")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
||||||
// Parse a valid file that sets a rule files with an absolute path
|
// Parse a valid file that sets a rule files with an absolute path
|
||||||
c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger())
|
c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, ruleFilesExpectedConf, c)
|
require.Equal(t, ruleFilesExpectedConf, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesWithKubeConfig(t *testing.T) {
|
func TestKubernetesWithKubeConfig(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesSelectors(t *testing.T) {
|
func TestKubernetesSelectors(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2033,24 +2163,39 @@ var expectedErrors = []struct {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
||||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`,
|
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
||||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml",
|
||||||
|
errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml",
|
||||||
|
errMsg: `unmarshal errors`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadConfigs(t *testing.T) {
|
func TestBadConfigs(t *testing.T) {
|
||||||
|
model.NameValidationScheme = model.LegacyValidation
|
||||||
|
defer func() {
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}()
|
||||||
for _, ee := range expectedErrors {
|
for _, ee := range expectedErrors {
|
||||||
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger())
|
||||||
require.Error(t, err, "%s", ee.filename)
|
require.ErrorContains(t, err, ee.errMsg,
|
||||||
require.Contains(t, err.Error(), ee.errMsg,
|
|
||||||
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadStaticConfigsJSON(t *testing.T) {
|
func TestBadStaticConfigsJSON(t *testing.T) {
|
||||||
|
model.NameValidationScheme = model.LegacyValidation
|
||||||
|
defer func() {
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}()
|
||||||
content, err := os.ReadFile("testdata/static_config.bad.json")
|
content, err := os.ReadFile("testdata/static_config.bad.json")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var tg targetgroup.Group
|
var tg targetgroup.Group
|
||||||
|
@ -2059,6 +2204,10 @@ func TestBadStaticConfigsJSON(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBadStaticConfigsYML(t *testing.T) {
|
func TestBadStaticConfigsYML(t *testing.T) {
|
||||||
|
model.NameValidationScheme = model.LegacyValidation
|
||||||
|
defer func() {
|
||||||
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
|
}()
|
||||||
content, err := os.ReadFile("testdata/static_config.bad.yml")
|
content, err := os.ReadFile("testdata/static_config.bad.yml")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var tg targetgroup.Group
|
var tg targetgroup.Group
|
||||||
|
@ -2067,7 +2216,7 @@ func TestBadStaticConfigsYML(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyConfig(t *testing.T) {
|
func TestEmptyConfig(t *testing.T) {
|
||||||
c, err := Load("", false, log.NewNopLogger())
|
c, err := Load("", promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exp := DefaultConfig
|
exp := DefaultConfig
|
||||||
require.Equal(t, exp, *c)
|
require.Equal(t, exp, *c)
|
||||||
|
@ -2077,38 +2226,34 @@ func TestExpandExternalLabels(t *testing.T) {
|
||||||
// Cleanup ant TEST env variable that could exist on the system.
|
// Cleanup ant TEST env variable that could exist on the system.
|
||||||
os.Setenv("TEST", "")
|
os.Setenv("TEST", "")
|
||||||
|
|
||||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
|
||||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
|
||||||
|
|
||||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||||
|
|
||||||
os.Setenv("TEST", "TestValue")
|
os.Setenv("TEST", "TestValue")
|
||||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentMode(t *testing.T) {
|
func TestAgentMode(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||||
|
|
||||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||||
|
|
||||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
||||||
|
|
||||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
||||||
|
|
||||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Empty(t, c.RemoteWriteConfigs)
|
require.Empty(t, c.RemoteWriteConfigs)
|
||||||
|
|
||||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
|
c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, c.RemoteWriteConfigs, 1)
|
require.Len(t, c.RemoteWriteConfigs, 1)
|
||||||
require.Equal(
|
require.Equal(
|
||||||
|
@ -2119,7 +2264,7 @@ func TestAgentMode(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyGlobalBlock(t *testing.T) {
|
func TestEmptyGlobalBlock(t *testing.T) {
|
||||||
c, err := Load("global:\n", false, log.NewNopLogger())
|
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exp := DefaultConfig
|
exp := DefaultConfig
|
||||||
exp.Runtime = DefaultRuntimeConfig
|
exp.Runtime = DefaultRuntimeConfig
|
||||||
|
@ -2274,7 +2419,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger())
|
c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
scfgs, err := c.GetScrapeConfigs()
|
scfgs, err := c.GetScrapeConfigs()
|
||||||
|
@ -2292,7 +2437,7 @@ func kubernetesSDHostURL() config.URL {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScrapeConfigDisableCompression(t *testing.T) {
|
func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger())
|
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -2323,23 +2468,23 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "global setting implies local settings",
|
name: "global setting implies local settings",
|
||||||
inputFile: "scrape_config_global_validation_mode",
|
inputFile: "scrape_config_global_validation_mode",
|
||||||
expectScheme: "utf8",
|
expectScheme: "legacy",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local setting",
|
name: "local setting",
|
||||||
inputFile: "scrape_config_local_validation_mode",
|
inputFile: "scrape_config_local_validation_mode",
|
||||||
expectScheme: "utf8",
|
expectScheme: "legacy",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local setting overrides global setting",
|
name: "local setting overrides global setting",
|
||||||
inputFile: "scrape_config_local_global_validation_mode",
|
inputFile: "scrape_config_local_global_validation_mode",
|
||||||
expectScheme: "legacy",
|
expectScheme: "utf8",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger())
|
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -2352,3 +2497,54 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestScrapeProtocolHeader(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
proto ScrapeProtocol
|
||||||
|
expectedValue string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "blank",
|
||||||
|
proto: ScrapeProtocol(""),
|
||||||
|
expectedValue: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid",
|
||||||
|
proto: ScrapeProtocol("invalid"),
|
||||||
|
expectedValue: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prometheus protobuf",
|
||||||
|
proto: PrometheusProto,
|
||||||
|
expectedValue: "application/vnd.google.protobuf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prometheus text 0.0.4",
|
||||||
|
proto: PrometheusText0_0_4,
|
||||||
|
expectedValue: "text/plain",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prometheus text 1.0.0",
|
||||||
|
proto: PrometheusText1_0_0,
|
||||||
|
expectedValue: "text/plain",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "openmetrics 0.0.1",
|
||||||
|
proto: OpenMetricsText0_0_1,
|
||||||
|
expectedValue: "application/openmetrics-text",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "openmetrics 1.0.0",
|
||||||
|
proto: OpenMetricsText1_0_0,
|
||||||
|
expectedValue: "application/openmetrics-text",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
mediaType := tc.proto.HeaderMediaType()
|
||||||
|
|
||||||
|
require.Equal(t, tc.expectedValue, mediaType)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
92
config/reload.go
Normal file
92
config/reload.go
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ExternalFilesConfig struct {
|
||||||
|
RuleFiles []string `yaml:"rule_files"`
|
||||||
|
ScrapeConfigFiles []string `yaml:"scrape_config_files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateChecksum generates a checksum of the YAML file and the files it references.
|
||||||
|
func GenerateChecksum(yamlFilePath string) (string, error) {
|
||||||
|
hash := sha256.New()
|
||||||
|
|
||||||
|
yamlContent, err := os.ReadFile(yamlFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error reading YAML file: %w", err)
|
||||||
|
}
|
||||||
|
_, err = hash.Write(yamlContent)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error writing YAML file to hash: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var config ExternalFilesConfig
|
||||||
|
if err := yaml.Unmarshal(yamlContent, &config); err != nil {
|
||||||
|
return "", fmt.Errorf("error unmarshalling YAML: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := filepath.Dir(yamlFilePath)
|
||||||
|
|
||||||
|
for i, file := range config.RuleFiles {
|
||||||
|
config.RuleFiles[i] = filepath.Join(dir, file)
|
||||||
|
}
|
||||||
|
for i, file := range config.ScrapeConfigFiles {
|
||||||
|
config.ScrapeConfigFiles[i] = filepath.Join(dir, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
files := map[string][]string{
|
||||||
|
"r": config.RuleFiles, // "r" for rule files
|
||||||
|
"s": config.ScrapeConfigFiles, // "s" for scrape config files
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, prefix := range []string{"r", "s"} {
|
||||||
|
for _, pattern := range files[prefix] {
|
||||||
|
matchingFiles, err := filepath.Glob(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range matchingFiles {
|
||||||
|
// Write prefix to the hash ("r" or "s") followed by \0, then
|
||||||
|
// the file path.
|
||||||
|
_, err = hash.Write([]byte(prefix + "\x00" + file + "\x00"))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error writing %q path to hash: %w", file, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and hash the content of the file.
|
||||||
|
content, err := os.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error reading file %s: %w", file, err)
|
||||||
|
}
|
||||||
|
_, err = hash.Write(append(content, []byte("\x00")...))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error writing %q content to hash: %w", file, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||||
|
}
|
222
config/reload_test.go
Normal file
222
config/reload_test.go
Normal file
|
@ -0,0 +1,222 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGenerateChecksum(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
// Define paths for the temporary files.
|
||||||
|
yamlFilePath := filepath.Join(tmpDir, "test.yml")
|
||||||
|
ruleFilePath := filepath.Join(tmpDir, "rule_file.yml")
|
||||||
|
scrapeConfigFilePath := filepath.Join(tmpDir, "scrape_config.yml")
|
||||||
|
|
||||||
|
// Define initial and modified content for the files.
|
||||||
|
originalRuleContent := "groups:\n- name: example\n rules:\n - alert: ExampleAlert"
|
||||||
|
modifiedRuleContent := "groups:\n- name: example\n rules:\n - alert: ModifiedAlert"
|
||||||
|
|
||||||
|
originalScrapeConfigContent := "scrape_configs:\n- job_name: example"
|
||||||
|
modifiedScrapeConfigContent := "scrape_configs:\n- job_name: modified_example"
|
||||||
|
|
||||||
|
// Define YAML content referencing the rule and scrape config files.
|
||||||
|
yamlContent := `
|
||||||
|
rule_files:
|
||||||
|
- rule_file.yml
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config.yml
|
||||||
|
`
|
||||||
|
|
||||||
|
// Write initial content to files.
|
||||||
|
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||||
|
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||||
|
|
||||||
|
// Generate the original checksum.
|
||||||
|
originalChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
|
||||||
|
t.Run("Rule File Change", func(t *testing.T) {
|
||||||
|
// Modify the rule file.
|
||||||
|
require.NoError(t, os.WriteFile(ruleFilePath, []byte(modifiedRuleContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||||
|
|
||||||
|
// Revert the rule file.
|
||||||
|
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Scrape Config Change", func(t *testing.T) {
|
||||||
|
// Modify the scrape config file.
|
||||||
|
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(modifiedScrapeConfigContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||||
|
|
||||||
|
// Revert the scrape config file.
|
||||||
|
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Rule File Deletion", func(t *testing.T) {
|
||||||
|
// Delete the rule file.
|
||||||
|
require.NoError(t, os.Remove(ruleFilePath))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
deletedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, deletedChecksum)
|
||||||
|
|
||||||
|
// Restore the rule file.
|
||||||
|
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Scrape Config Deletion", func(t *testing.T) {
|
||||||
|
// Delete the scrape config file.
|
||||||
|
require.NoError(t, os.Remove(scrapeConfigFilePath))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
deletedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, deletedChecksum)
|
||||||
|
|
||||||
|
// Restore the scrape config file.
|
||||||
|
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Main File Change", func(t *testing.T) {
|
||||||
|
// Modify the main YAML file.
|
||||||
|
modifiedYamlContent := `
|
||||||
|
global:
|
||||||
|
scrape_interval: 3s
|
||||||
|
rule_files:
|
||||||
|
- rule_file.yml
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config.yml
|
||||||
|
`
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||||
|
|
||||||
|
// Revert the main YAML file.
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Rule File Removed from YAML Config", func(t *testing.T) {
|
||||||
|
// Modify the YAML content to remove the rule file.
|
||||||
|
modifiedYamlContent := `
|
||||||
|
scrape_config_files:
|
||||||
|
- scrape_config.yml
|
||||||
|
`
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||||
|
|
||||||
|
// Revert the YAML content.
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Scrape Config Removed from YAML Config", func(t *testing.T) {
|
||||||
|
// Modify the YAML content to remove the scrape config file.
|
||||||
|
modifiedYamlContent := `
|
||||||
|
rule_files:
|
||||||
|
- rule_file.yml
|
||||||
|
`
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(modifiedYamlContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
modifiedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, modifiedChecksum)
|
||||||
|
|
||||||
|
// Revert the YAML content.
|
||||||
|
require.NoError(t, os.WriteFile(yamlFilePath, []byte(yamlContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Empty Rule File", func(t *testing.T) {
|
||||||
|
// Write an empty rule file.
|
||||||
|
require.NoError(t, os.WriteFile(ruleFilePath, []byte(""), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
emptyChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, emptyChecksum)
|
||||||
|
|
||||||
|
// Restore the rule file.
|
||||||
|
require.NoError(t, os.WriteFile(ruleFilePath, []byte(originalRuleContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Empty Scrape Config File", func(t *testing.T) {
|
||||||
|
// Write an empty scrape config file.
|
||||||
|
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(""), 0o644))
|
||||||
|
|
||||||
|
// Checksum should change.
|
||||||
|
emptyChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.NotEqual(t, originalChecksum, emptyChecksum)
|
||||||
|
|
||||||
|
// Restore the scrape config file.
|
||||||
|
require.NoError(t, os.WriteFile(scrapeConfigFilePath, []byte(originalScrapeConfigContent), 0o644))
|
||||||
|
|
||||||
|
// Checksum should return to the original.
|
||||||
|
revertedChecksum := calculateChecksum(t, yamlFilePath)
|
||||||
|
require.Equal(t, originalChecksum, revertedChecksum)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateChecksum generates a checksum for the given YAML file path.
|
||||||
|
func calculateChecksum(t *testing.T, yamlFilePath string) string {
|
||||||
|
checksum, err := GenerateChecksum(yamlFilePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEmpty(t, checksum)
|
||||||
|
return checksum
|
||||||
|
}
|
11
config/testdata/conf.good.yml
vendored
11
config/testdata/conf.good.yml
vendored
|
@ -8,6 +8,8 @@ global:
|
||||||
label_limit: 30
|
label_limit: 30
|
||||||
label_name_length_limit: 200
|
label_name_length_limit: 200
|
||||||
label_value_length_limit: 200
|
label_value_length_limit: 200
|
||||||
|
query_log_file: query.log
|
||||||
|
scrape_failure_log_file: fail.log
|
||||||
# scrape_timeout is set to the global default (10s).
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
external_labels:
|
external_labels:
|
||||||
|
@ -72,6 +74,9 @@ scrape_configs:
|
||||||
# metrics_path defaults to '/metrics'
|
# metrics_path defaults to '/metrics'
|
||||||
# scheme defaults to 'http'.
|
# scheme defaults to 'http'.
|
||||||
|
|
||||||
|
fallback_scrape_protocol: PrometheusText0.0.4
|
||||||
|
|
||||||
|
scrape_failure_log_file: fail_prom.log
|
||||||
file_sd_configs:
|
file_sd_configs:
|
||||||
- files:
|
- files:
|
||||||
- foo/*.slow.json
|
- foo/*.slow.json
|
||||||
|
@ -87,6 +92,12 @@ scrape_configs:
|
||||||
my: label
|
my: label
|
||||||
your: label
|
your: label
|
||||||
|
|
||||||
|
http_headers:
|
||||||
|
foo:
|
||||||
|
values: ["foobar"]
|
||||||
|
secrets: ["bar", "foo"]
|
||||||
|
files: ["valid_password_file"]
|
||||||
|
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
- source_labels: [job, __meta_dns_name]
|
- source_labels: [job, __meta_dns_name]
|
||||||
regex: (.*)some-[regex]
|
regex: (.*)some-[regex]
|
||||||
|
|
7
config/testdata/config_with_deprecated_am_api_config.yml
vendored
Normal file
7
config/testdata/config_with_deprecated_am_api_config.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
alerting:
|
||||||
|
alertmanagers:
|
||||||
|
- scheme: http
|
||||||
|
api_version: v1
|
||||||
|
file_sd_configs:
|
||||||
|
- files:
|
||||||
|
- nonexistent_file.yml
|
2
config/testdata/jobname_dup.bad.yml
vendored
2
config/testdata/jobname_dup.bad.yml
vendored
|
@ -1,4 +1,6 @@
|
||||||
# Two scrape configs with the same job names are not allowed.
|
# Two scrape configs with the same job names are not allowed.
|
||||||
|
global:
|
||||||
|
metric_name_validation_scheme: legacy
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: prometheus
|
- job_name: prometheus
|
||||||
- job_name: service-x
|
- job_name: service-x
|
||||||
|
|
2
config/testdata/lowercase.bad.yml
vendored
2
config/testdata/lowercase.bad.yml
vendored
|
@ -1,3 +1,5 @@
|
||||||
|
global:
|
||||||
|
metric_name_validation_scheme: legacy
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: prometheus
|
- job_name: prometheus
|
||||||
relabel_configs:
|
relabel_configs:
|
||||||
|
|
2
config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml
vendored
Normal file
2
config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
otlp:
|
||||||
|
keep_identifying_resource_attributes: true
|
4
config/testdata/otlp_allow_utf8.bad.yml
vendored
Normal file
4
config/testdata/otlp_allow_utf8.bad.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
global:
|
||||||
|
metric_name_validation_scheme: legacy
|
||||||
|
otlp:
|
||||||
|
translation_strategy: Invalid
|
2
config/testdata/otlp_allow_utf8.good.yml
vendored
Normal file
2
config/testdata/otlp_allow_utf8.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
otlp:
|
||||||
|
translation_strategy: NoUTF8EscapingWithSuffixes
|
4
config/testdata/otlp_allow_utf8.incompatible.yml
vendored
Normal file
4
config/testdata/otlp_allow_utf8.incompatible.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
global:
|
||||||
|
metric_name_validation_scheme: legacy
|
||||||
|
otlp:
|
||||||
|
translation_strategy: NoUTF8EscapingWithSuffixes
|
5
config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
fallback_scrape_protocol: "prometheusproto"
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
5
config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: node
|
||||||
|
fallback_scrape_protocol: ["OpenMetricsText1.0.0", "PrometheusText0.0.4"]
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8080']
|
|
@ -1,4 +1,4 @@
|
||||||
global:
|
global:
|
||||||
metric_name_validation_scheme: utf8
|
metric_name_validation_scheme: legacy
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: prometheus
|
- job_name: prometheus
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
global:
|
global:
|
||||||
metric_name_validation_scheme: utf8
|
metric_name_validation_scheme: legacy
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: prometheus
|
- job_name: prometheus
|
||||||
metric_name_validation_scheme: legacy
|
metric_name_validation_scheme: utf8
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
scrape_configs:
|
scrape_configs:
|
||||||
- job_name: prometheus
|
- job_name: prometheus
|
||||||
metric_name_validation_scheme: utf8
|
metric_name_validation_scheme: legacy
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
{{/* vim: set ft=html: */}}
|
|
||||||
|
|
||||||
{{/* Navbar, should be passed . */}}
|
|
||||||
{{ define "navbar" }}
|
|
||||||
<nav class="navbar fixed-top navbar-expand-sm navbar-dark bg-dark">
|
|
||||||
<div class="container-fluid">
|
|
||||||
<!-- Brand and toggle get grouped for better mobile display -->
|
|
||||||
<div class="navbar-header">
|
|
||||||
<button type="button" class="navbar-toggler" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1" aria-expanded="false" aria-controls="navbar-nav" aria-label="toggle navigation">
|
|
||||||
<span class="navbar-toggler-icon"></span>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
</button>
|
|
||||||
<a class="navbar-brand" href="{{ pathPrefix }}/">Prometheus</a>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
|
|
||||||
<ul class="nav navbar-nav">
|
|
||||||
<li class="nav-item"><a class="nav-link" href="{{ pathPrefix }}/alerts">Alerts</a></li>
|
|
||||||
<li class="nav-item"><a class="nav-link" href="https://www.pagerduty.com/">PagerDuty</a></li>
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</nav>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{/* LHS menu, should be passed . */}}
|
|
||||||
{{ define "menu" }}
|
|
||||||
<div class="prom_lhs_menu row">
|
|
||||||
<nav class="col-md-2 md-block bg-dark sidebar prom_lhs_menu_nav">
|
|
||||||
<div class="sidebar-sticky">
|
|
||||||
<ul class="nav flex-column">
|
|
||||||
|
|
||||||
{{ template "_menuItem" (args . "index.html.example" "Overview") }}
|
|
||||||
|
|
||||||
{{ if query "up{job='node'}" }}
|
|
||||||
{{ template "_menuItem" (args . "node.html" "Node") }}
|
|
||||||
{{ if match "^node" .Path }}
|
|
||||||
{{ if .Params.instance }}
|
|
||||||
<ul>
|
|
||||||
<li {{ if eq .Path "node-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
|
||||||
<a class="nav-link" href="node-overview.html?instance={{ .Params.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</a>
|
|
||||||
</li>
|
|
||||||
<ul>
|
|
||||||
<li {{ if eq .Path "node-cpu.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
|
||||||
<a class="nav-link" href="node-cpu.html?instance={{ .Params.instance }}">CPU</a>
|
|
||||||
</li>
|
|
||||||
<li {{ if eq .Path "node-disk.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
|
||||||
<a class="nav-link" href="node-disk.html?instance={{ .Params.instance }}">Disk</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
</ul>
|
|
||||||
{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ if query "up{job='prometheus'}" }}
|
|
||||||
{{ template "_menuItem" (args . "prometheus.html" "Prometheus") }}
|
|
||||||
{{ if match "^prometheus" .Path }}
|
|
||||||
{{ if .Params.instance }}
|
|
||||||
<ul>
|
|
||||||
<li {{ if eq .Path "prometheus-overview.html" }}class="prom_lhs_menu_selected nav-item"{{ end }}>
|
|
||||||
<a class="nav-link" href="prometheus-overview.html?instance={{ .Params.instance }}">{{.Params.instance }}</a>
|
|
||||||
</li>
|
|
||||||
</ul>
|
|
||||||
{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
</nav>
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{/* Helper, pass (args . path name) */}}
|
|
||||||
{{ define "_menuItem" }}
|
|
||||||
<li {{ if eq .arg0.Path .arg1 }} class="prom_lhs_menu_selected nav-item" {{ end }}><a class="nav-link" href="{{ .arg1 }}">{{ .arg2 }}</a></li>
|
|
||||||
{{ end }}
|
|
||||||
|
|
|
@ -1,138 +0,0 @@
|
||||||
{{/* vim: set ft=html: */}}
|
|
||||||
{{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
|
|
||||||
{{ define "prom_console_head" }}
|
|
||||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css">
|
|
||||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
|
|
||||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prom_console.css">
|
|
||||||
<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js"></script>
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js"></script>
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js"></script>
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js"></script>
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
|
|
||||||
|
|
||||||
<script>
|
|
||||||
var PATH_PREFIX = "{{ pathPrefix }}";
|
|
||||||
</script>
|
|
||||||
<script src="{{ pathPrefix }}/classic/static/js/prom_console.js"></script>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{/* Top of all pages. */}}
|
|
||||||
{{ define "head" -}}
|
|
||||||
<!doctype html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
{{ template "prom_console_head" }}
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
{{ template "navbar" . }}
|
|
||||||
|
|
||||||
{{ template "menu" . }}
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ define "__prom_query_drilldown_noop" }}{{ . }}{{ end }}
|
|
||||||
{{ define "humanize" }}{{ humanize . }}{{ end }}
|
|
||||||
{{ define "humanizeNoSmallPrefix" }}{{ if and (lt . 1.0) (gt . -1.0) }}{{ printf "%.3g" . }}{{ else }}{{ humanize . }}{{ end }}{{ end }}
|
|
||||||
{{ define "humanize1024" }}{{ humanize1024 . }}{{ end }}
|
|
||||||
{{ define "humanizeDuration" }}{{ humanizeDuration . }}{{ end }}
|
|
||||||
{{ define "humanizePercentage" }}{{ humanizePercentage . }}{{ end }}
|
|
||||||
{{ define "humanizeTimestamp" }}{{ humanizeTimestamp . }}{{ end }}
|
|
||||||
{{ define "printf.1f" }}{{ printf "%.1f" . }}{{ end }}
|
|
||||||
{{ define "printf.3g" }}{{ printf "%.3g" . }}{{ end }}
|
|
||||||
|
|
||||||
{{/* prom_query_drilldown (args expr suffix? renderTemplate?)
|
|
||||||
Displays the result of the expression, with a link to /graph for it.
|
|
||||||
|
|
||||||
renderTemplate is the name of the template to use to render the value.
|
|
||||||
*/}}
|
|
||||||
{{ define "prom_query_drilldown" }}
|
|
||||||
{{ $expr := .arg0 }}{{ $suffix := (or .arg1 "") }}{{ $renderTemplate := (or .arg2 "__prom_query_drilldown_noop") }}
|
|
||||||
<a class="prom_query_drilldown" href="{{ pathPrefix }}{{ graphLink $expr }}">{{ with query $expr }}{{tmpl $renderTemplate ( . | first | value )}}{{ $suffix }}{{ else }}-{{ end }}</a>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ define "prom_path" }}/consoles/{{ .Path }}?{{ range $param, $value := .Params }}{{ $param }}={{ $value }}&{{ end }}{{ end }}"
|
|
||||||
|
|
||||||
{{ define "prom_right_table_head" }}
|
|
||||||
<div class="prom_console_rhs">
|
|
||||||
<table class="table table-bordered table-hover table-sm">
|
|
||||||
{{ end }}
|
|
||||||
{{ define "prom_right_table_tail" }}
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{/* RHS table head, pass job name. Should be used after prom_right_table_head. */}}
|
|
||||||
{{ define "prom_right_table_job_head" }}
|
|
||||||
<tr>
|
|
||||||
<th>{{ . }}</th>
|
|
||||||
<th>{{ template "prom_query_drilldown" (args (printf "sum(up{job='%s'})" .)) }} / {{ template "prom_query_drilldown" (args (printf "count(up{job='%s'})" .)) }}</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>CPU</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(irate(process_cpu_seconds_total{job='%s'}[5m]))" .) "s/s" "humanizeNoSmallPrefix") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Memory</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "avg by(job)(process_resident_memory_bytes{job='%s'})" .) "B" "humanize1024") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
|
|
||||||
{{ define "prom_content_head" }}
|
|
||||||
<div class="prom_console_content">
|
|
||||||
<div class="container-fluid">
|
|
||||||
{{ template "prom_graph_timecontrol" . }}
|
|
||||||
{{ end }}
|
|
||||||
{{ define "prom_content_tail" }}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ define "prom_graph_timecontrol" }}
|
|
||||||
<div class="prom_graph_timecontrol">
|
|
||||||
<div class="prom_graph_timecontrol_inner">
|
|
||||||
<div class="prom_graph_timecontrol_group ">
|
|
||||||
<button class="btn btn-light pull-left" type="button" id="prom_graph_duration_shrink" title="Shrink the time range.">
|
|
||||||
<i class="glyphicon glyphicon-minus"></i>
|
|
||||||
</button><!-- Comments between elements to remove spaces
|
|
||||||
--><input class="input pull-left align-middle" size="3" title="Time range of graph" type="text" id="prom_graph_duration"><!--
|
|
||||||
--><button class="btn btn-light pull-left" type="button" id="prom_graph_duration_grow" title="Grow the time range.">
|
|
||||||
<i class="glyphicon glyphicon-plus"></i>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
<div class="prom_graph_timecontrol_group ">
|
|
||||||
<button class="btn btn-light pull-left" type="button" id="prom_graph_time_back" title="Rewind the end time.">
|
|
||||||
<i class="glyphicon glyphicon-backward"></i>
|
|
||||||
</button><!--
|
|
||||||
--><input class="input pull-left align-middle" title="End time of graph" placeholder="Until" type="text" id="prom_graph_time_end" size="16" value=""><!--
|
|
||||||
--><button class="btn btn-light pull-left" type="button" id="prom_graph_time_forward" title="Advance the end time.">
|
|
||||||
<i class="glyphicon glyphicon-forward"></i>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
<div class="prom_graph_timecontrol_group ">
|
|
||||||
<div class="btn-group dropup prom_graph_timecontrol_refresh pull-left">
|
|
||||||
<button type="button" class="btn btn-light pull-left" id="prom_graph_refresh_button" title="Refresh.">
|
|
||||||
<i class="glyphicon glyphicon-repeat"></i>
|
|
||||||
<span class="icon-repeat"></span>
|
|
||||||
(<span id="prom_graph_refresh_button_value">Off</span>)
|
|
||||||
</button>
|
|
||||||
<button type="button" class="btn btn-light pull-left dropdown-toggle" data-toggle="dropdown" title="Set autorefresh."aria-haspopup="true" aria-expanded="false">
|
|
||||||
<span class="caret"></span>
|
|
||||||
</button>
|
|
||||||
<ul class="dropdown-menu" id="prom_graph_refresh_intervals" role="menu">
|
|
||||||
</ul>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.TimeControl();
|
|
||||||
</script>
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{/* Bottom of all pages. */}}
|
|
||||||
{{ define "tail" }}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
{{ end }}
|
|
|
@ -1,28 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>Overview</h1>
|
|
||||||
<p>These are example consoles for Prometheus.</p>
|
|
||||||
|
|
||||||
<p>These consoles expect exporters to have the following job labels:</p>
|
|
||||||
<table class="table table-sm table-striped table-bordered" style="width: 0%">
|
|
||||||
<tr>
|
|
||||||
<th>Exporter</th>
|
|
||||||
<th>Job label</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Node Exporter</td>
|
|
||||||
<td><code>node</code></td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Prometheus</td>
|
|
||||||
<td><code>prometheus</code></td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -1,60 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">CPU(s): {{ template "prom_query_drilldown" (args (printf "scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance)) }}</th>
|
|
||||||
</tr>
|
|
||||||
{{ range printf "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='%s'}[5m])) * 100 / scalar(count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'})))" .Params.instance .Params.instance | query | sortByLabel "mode" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.mode | title }} CPU</td>
|
|
||||||
<td>{{ .Value | printf "%.1f" }}%</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
<tr><th colspan="2">Misc</th></tr>
|
|
||||||
<tr>
|
|
||||||
<td>Processes Running</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_running{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Processes Blocked</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "node_procs_blocked{job='node',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Forks</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_forks_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Context Switches</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_context_switches_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Interrupts</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_intr_total{job='node',instance='%s'}[5m])" .Params.instance) "/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>1m Loadavg</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "node_load1{job='node',instance='%s'}" .Params.instance)) }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
</tr>
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>Node CPU - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
|
||||||
|
|
||||||
<h3>CPU Usage</h3>
|
|
||||||
<div id="cpuGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#cpuGraph"),
|
|
||||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
|
||||||
renderer: 'area',
|
|
||||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yTitle: 'Cores'
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -1,78 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Disks</th>
|
|
||||||
</tr>
|
|
||||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
|
|
||||||
<th colspan="2">{{ .Labels.device }}</th>
|
|
||||||
<tr>
|
|
||||||
<td>Utilization</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Throughput</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Avg Read Time</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_reads_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Avg Write Time</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_write_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) / irate(node_disk_writes_completed_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Filesystem Fullness</th>
|
|
||||||
</tr>
|
|
||||||
{{ define "roughlyNearZero" }}
|
|
||||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.mountpoint }}</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
<tr>
|
|
||||||
</tr>
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>Node Disk - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
|
||||||
|
|
||||||
<h3>Disk I/O Utilization</h3>
|
|
||||||
<div id="diskioGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#diskioGraph"),
|
|
||||||
expr: [
|
|
||||||
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
|
|
||||||
],
|
|
||||||
min: 0,
|
|
||||||
name: '[[ device ]]',
|
|
||||||
yUnits: "%",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yTitle: 'Disk I/O Utilization'
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
<h3>Filesystem Usage</h3>
|
|
||||||
<div id="fsGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#fsGraph"),
|
|
||||||
expr: "100 - node_filesystem_avail_bytes{job='node',instance='{{ .Params.instance }}'} / node_filesystem_size_bytes{job='node'} * 100",
|
|
||||||
min: 0,
|
|
||||||
max: 100,
|
|
||||||
name: '[[ mountpoint ]]',
|
|
||||||
yUnits: "%",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yTitle: 'Filesystem Fullness'
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -1,121 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
<tr><th colspan="2">Overview</th></tr>
|
|
||||||
<tr>
|
|
||||||
<td>User CPU</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>System CPU</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(node_cpu_seconds_total{job='node',instance='%s',mode='system'}[5m])) * 100 / count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Memory Total</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemTotal_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Memory Free</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Network</th>
|
|
||||||
</tr>
|
|
||||||
{{ range printf "node_network_receive_bytes_total{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.device }} Received</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_receive_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.device }} Transmitted</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_network_transmit_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Disks</th>
|
|
||||||
</tr>
|
|
||||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s',device!~'^(md\\\\d+$|dm-)'}" .Params.instance | query | sortByLabel "device" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.device }} Utilization</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_io_time_seconds_total{job='node',instance='%s',device='%s'}[5m]) * 100" .Labels.instance .Labels.device) "%" "printf.1f") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
{{ range printf "node_disk_io_time_seconds_total{job='node',instance='%s'}" .Params.instance | query | sortByLabel "device" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.device }} Throughput</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(node_disk_read_bytes_total{job='node',instance='%s',device='%s'}[5m]) + irate(node_disk_written_bytes_total{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device .Labels.instance .Labels.device) "B/s" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Filesystem Fullness</th>
|
|
||||||
</tr>
|
|
||||||
{{ define "roughlyNearZero" }}
|
|
||||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
|
||||||
{{ end }}
|
|
||||||
{{ range printf "node_filesystem_size_bytes{job='node',instance='%s'}" .Params.instance | query | sortByLabel "mountpoint" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.mountpoint }}</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - node_filesystem_avail_bytes{job='node',instance='%s',mountpoint='%s'} / node_filesystem_size_bytes{job='node'} * 100" .Labels.instance .Labels.mountpoint) "%" "roughlyNearZero") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
|
||||||
|
|
||||||
<h3>CPU Usage</h3>
|
|
||||||
<div id="cpuGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#cpuGraph"),
|
|
||||||
expr: "sum by (mode)(irate(node_cpu_seconds_total{job='node',instance='{{ .Params.instance }}',mode!='idle',mode!='iowait',mode!='steal'}[5m]))",
|
|
||||||
renderer: 'area',
|
|
||||||
max: {{ with printf "count(count by (cpu)(node_cpu_seconds_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yTitle: 'Cores'
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Disk I/O Utilization</h3>
|
|
||||||
<div id="diskioGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#diskioGraph"),
|
|
||||||
expr: [
|
|
||||||
"irate(node_disk_io_time_seconds_total{job='node',instance='{{ .Params.instance }}',device!~'^(md\\\\d+$|dm-)'}[5m]) * 100",
|
|
||||||
],
|
|
||||||
min: 0,
|
|
||||||
name: '[[ device ]]',
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yUnits: "%",
|
|
||||||
yTitle: 'Disk I/O Utilization'
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>Memory</h3>
|
|
||||||
<div id="memoryGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#memoryGraph"),
|
|
||||||
renderer: 'area',
|
|
||||||
expr: [
|
|
||||||
"node_memory_Cached_bytes{job='node',instance='{{ .Params.instance }}'}",
|
|
||||||
"node_memory_Buffers_bytes{job='node',instance='{{ .Params.instance }}'}",
|
|
||||||
"node_memory_MemTotal_bytes{job='node',instance='{{ .Params.instance }}'} - node_memory_MemFree_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Buffers_bytes{job='node',instance='{{.Params.instance}}'} - node_memory_Cached_bytes{job='node',instance='{{.Params.instance}}'}",
|
|
||||||
"node_memory_MemFree{job='node',instance='{{ .Params.instance }}'}",
|
|
||||||
],
|
|
||||||
name: ["Cached", "Buffers", "Used", "Free"],
|
|
||||||
min: 0,
|
|
||||||
yUnits: "B",
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
|
|
||||||
yTitle: 'Memory'
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -1,35 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
<tr>
|
|
||||||
<th>Node</th>
|
|
||||||
<th>{{ template "prom_query_drilldown" (args "sum(up{job='node'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='node'})") }}</th>
|
|
||||||
</tr>
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>Node</h1>
|
|
||||||
|
|
||||||
<table class="table table-condensed table-striped table-bordered" style="width: 0%">
|
|
||||||
<tr>
|
|
||||||
<th>Node</th>
|
|
||||||
<th>Up</th>
|
|
||||||
<th>CPU<br/>Used</th>
|
|
||||||
<th>Memory<br/> Available</th>
|
|
||||||
</tr>
|
|
||||||
{{ range query "up{job='node'}" | sortByLabel "instance" }}
|
|
||||||
<tr>
|
|
||||||
<td><a href="node-overview.html?instance={{ .Labels.instance }}">{{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Labels.instance }}</a></td>
|
|
||||||
<td{{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "100 * (1 - avg by(instance) (sum without(mode) (irate(node_cpu_seconds_total{job='node',mode=~'idle|iowait|steal',instance='%s'}[5m]))))" .Labels.instance) "%" "printf.1f") }}</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "node_memory_MemFree_bytes{job='node',instance='%s'} + node_memory_Cached_bytes{job='node',instance='%s'} + node_memory_Buffers_bytes{job='node',instance='%s'}" .Labels.instance .Labels.instance .Labels.instance) "B" "humanize1024") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ else }}
|
|
||||||
<tr><td colspan=4>No nodes found.</td></tr>
|
|
||||||
{{ end }}
|
|
||||||
</table>
|
|
||||||
|
|
||||||
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -1,96 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Overview</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>CPU</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(process_cpu_seconds_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "s/s" "humanizeNoSmallPrefix") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Memory</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Version</td>
|
|
||||||
<td>{{ with query (printf "prometheus_build_info{job='prometheus',instance='%s'}" .Params.instance) }}{{. | first | label "version"}}{{end}}</td>
|
|
||||||
</tr>
|
|
||||||
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Storage</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Ingested Samples</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Params.instance) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Head Series</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_head_series{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Blocks Loaded</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_tsdb_blocks_loaded{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">Rules</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Evaluation Duration</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_evaluator_duration_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_evaluator_duration_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Notification Latency</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_notifications_latency_seconds_sum{job='prometheus',instance='%s'}[5m]) / irate(prometheus_notifications_latency_seconds_count{job='prometheus',instance='%s'}[5m])" .Params.instance .Params.instance) "" "humanizeDuration") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td>Notification Queue</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "prometheus_notifications_queue_length{job='prometheus',instance='%s'}" .Params.instance) "" "humanize") }}</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th colspan="2">HTTP Server</th>
|
|
||||||
</tr>
|
|
||||||
{{ range printf "prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s'}" .Params.instance | query | sortByLabel "handler" }}
|
|
||||||
<tr>
|
|
||||||
<td>{{ .Labels.handler }}</td>
|
|
||||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='%s',handler='%s'}[5m])" .Labels.instance .Labels.handler) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
||||||
</tr>
|
|
||||||
{{ end }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<div class="prom_content_div">
|
|
||||||
<h1>Prometheus Overview - {{ .Params.instance }}</h1>
|
|
||||||
|
|
||||||
<h3>Ingested Samples</h3>
|
|
||||||
<div id="samplesGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#samplesGraph"),
|
|
||||||
expr: "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
|
||||||
name: 'Ingested Samples',
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yTitle: "Samples",
|
|
||||||
yUnits: "/s",
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<h3>HTTP Server</h3>
|
|
||||||
<div id="serverGraph"></div>
|
|
||||||
<script>
|
|
||||||
new PromConsole.Graph({
|
|
||||||
node: document.querySelector("#serverGraph"),
|
|
||||||
expr: "irate(prometheus_http_request_duration_seconds_count{job='prometheus',instance='{{ .Params.instance }}'}[5m])",
|
|
||||||
name: '[[handler]]',
|
|
||||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
|
||||||
yTitle: "Requests",
|
|
||||||
yUnits: "/s",
|
|
||||||
})
|
|
||||||
</script>
|
|
||||||
</div>
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -1,34 +0,0 @@
|
||||||
{{ template "head" . }}
|
|
||||||
|
|
||||||
{{ template "prom_right_table_head" }}
|
|
||||||
<tr>
|
|
||||||
<th>Prometheus</th>
|
|
||||||
<th>{{ template "prom_query_drilldown" (args "sum(up{job='prometheus'})") }} / {{ template "prom_query_drilldown" (args "count(up{job='prometheus'})") }}</th>
|
|
||||||
</tr>
|
|
||||||
{{ template "prom_right_table_tail" }}
|
|
||||||
|
|
||||||
{{ template "prom_content_head" . }}
|
|
||||||
<h1>Prometheus</h1>
|
|
||||||
|
|
||||||
<table class="table table-sm table-striped table-bordered" style="width: 0%">
|
|
||||||
<tr>
|
|
||||||
<th>Prometheus</th>
|
|
||||||
<th>Up</th>
|
|
||||||
<th>Ingested Samples</th>
|
|
||||||
<th>Memory</th>
|
|
||||||
</tr>
|
|
||||||
{{ range query "up{job='prometheus'}" | sortByLabel "instance" }}
|
|
||||||
<tr>
|
|
||||||
<td><a href="prometheus-overview.html?instance={{ .Labels.instance }}">{{ .Labels.instance }}</a></td>
|
|
||||||
<td {{ if eq (. | value) 1.0 }}>Yes{{ else }} class="alert-danger">No{{ end }}</td>
|
|
||||||
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "irate(prometheus_tsdb_head_samples_appended_total{job='prometheus',instance='%s'}[5m])" .Labels.instance) "/s" "humanizeNoSmallPrefix") }}</td>
|
|
||||||
<td class="text-right">{{ template "prom_query_drilldown" (args (printf "process_resident_memory_bytes{job='prometheus',instance='%s'}" .Labels.instance) "B" "humanize1024")}}</td>
|
|
||||||
</tr>
|
|
||||||
{{ else }}
|
|
||||||
<tr><td colspan=4>No devices found.</td></tr>
|
|
||||||
{{ end }}
|
|
||||||
</table>
|
|
||||||
|
|
||||||
{{ template "prom_content_tail" . }}
|
|
||||||
|
|
||||||
{{ template "tail" }}
|
|
|
@ -233,7 +233,7 @@ type Config interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DiscovererOptions struct {
|
type DiscovererOptions struct {
|
||||||
Logger log.Logger
|
Logger *slog.Logger
|
||||||
|
|
||||||
// A registerer for the Discoverer's metrics.
|
// A registerer for the Discoverer's metrics.
|
||||||
Registerer prometheus.Registerer
|
Registerer prometheus.Registerer
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -29,11 +30,11 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/go-kit/log"
|
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
@ -146,9 +147,9 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
// the Discoverer interface.
|
// the Discoverer interface.
|
||||||
type EC2Discovery struct {
|
type EC2Discovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
cfg *EC2SDConfig
|
cfg *EC2SDConfig
|
||||||
ec2 *ec2.EC2
|
ec2 ec2iface.EC2API
|
||||||
|
|
||||||
// azToAZID maps this account's availability zones to their underlying AZ
|
// azToAZID maps this account's availability zones to their underlying AZ
|
||||||
// ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so
|
// ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so
|
||||||
|
@ -157,14 +158,14 @@ type EC2Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
||||||
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||||
m, ok := metrics.(*ec2Metrics)
|
m, ok := metrics.(*ec2Metrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
d := &EC2Discovery{
|
d := &EC2Discovery{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
@ -182,7 +183,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.Dis
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
func (d *EC2Discovery) ec2Client(context.Context) (ec2iface.EC2API, error) {
|
||||||
if d.ec2 != nil {
|
if d.ec2 != nil {
|
||||||
return d.ec2, nil
|
return d.ec2, nil
|
||||||
}
|
}
|
||||||
|
@ -254,8 +255,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
// Prometheus requires a reload if AWS adds a new AZ to the region.
|
// Prometheus requires a reload if AWS adds a new AZ to the region.
|
||||||
if d.azToAZID == nil {
|
if d.azToAZID == nil {
|
||||||
if err := d.refreshAZIDs(ctx); err != nil {
|
if err := d.refreshAZIDs(ctx); err != nil {
|
||||||
level.Debug(d.logger).Log(
|
d.logger.Debug(
|
||||||
"msg", "Unable to describe availability zones",
|
"Unable to describe availability zones",
|
||||||
"err", err)
|
"err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,8 +297,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
|
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
|
||||||
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
|
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
|
||||||
if !ok && d.azToAZID != nil {
|
if !ok && d.azToAZID != nil {
|
||||||
level.Debug(d.logger).Log(
|
d.logger.Debug(
|
||||||
"msg", "Availability zone ID not found",
|
"Availability zone ID not found",
|
||||||
"az", *inst.Placement.AvailabilityZone)
|
"az", *inst.Placement.AvailabilityZone)
|
||||||
}
|
}
|
||||||
labels[ec2LabelAZID] = model.LabelValue(azID)
|
labels[ec2LabelAZID] = model.LabelValue(azID)
|
||||||
|
|
434
discovery/aws/ec2_test.go
Normal file
434
discovery/aws/ec2_test.go
Normal file
|
@ -0,0 +1,434 @@
|
||||||
|
// Copyright 2024 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package aws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
|
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/goleak"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helper function to get pointers on literals.
|
||||||
|
// NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package.
|
||||||
|
func strptr(str string) *string {
|
||||||
|
return &str
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolptr(b bool) *bool {
|
||||||
|
return &b
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64ptr(i int64) *int64 {
|
||||||
|
return &i
|
||||||
|
}
|
||||||
|
|
||||||
|
// Struct for test data.
|
||||||
|
type ec2DataStore struct {
|
||||||
|
region string
|
||||||
|
|
||||||
|
azToAZID map[string]string
|
||||||
|
|
||||||
|
ownerID string
|
||||||
|
|
||||||
|
instances []*ec2.Instance
|
||||||
|
}
|
||||||
|
|
||||||
|
// The tests itself.
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
goleak.VerifyTestMain(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEC2DiscoveryRefreshAZIDs(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// iterate through the test cases
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
shouldFail bool
|
||||||
|
ec2Data *ec2DataStore
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Normal",
|
||||||
|
shouldFail: false,
|
||||||
|
ec2Data: &ec2DataStore{
|
||||||
|
azToAZID: map[string]string{
|
||||||
|
"azname-a": "azid-1",
|
||||||
|
"azname-b": "azid-2",
|
||||||
|
"azname-c": "azid-3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "HandleError",
|
||||||
|
shouldFail: true,
|
||||||
|
ec2Data: &ec2DataStore{},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
client := newMockEC2Client(tt.ec2Data)
|
||||||
|
|
||||||
|
d := &EC2Discovery{
|
||||||
|
ec2: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.refreshAZIDs(ctx)
|
||||||
|
if tt.shouldFail {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, client.ec2Data.azToAZID, d.azToAZID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEC2DiscoveryRefresh(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// iterate through the test cases
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
ec2Data *ec2DataStore
|
||||||
|
expected []*targetgroup.Group
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "NoPrivateIp",
|
||||||
|
ec2Data: &ec2DataStore{
|
||||||
|
region: "region-noprivateip",
|
||||||
|
azToAZID: map[string]string{
|
||||||
|
"azname-a": "azid-1",
|
||||||
|
"azname-b": "azid-2",
|
||||||
|
"azname-c": "azid-3",
|
||||||
|
},
|
||||||
|
instances: []*ec2.Instance{
|
||||||
|
{
|
||||||
|
InstanceId: strptr("instance-id-noprivateip"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*targetgroup.Group{
|
||||||
|
{
|
||||||
|
Source: "region-noprivateip",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NoVpc",
|
||||||
|
ec2Data: &ec2DataStore{
|
||||||
|
region: "region-novpc",
|
||||||
|
azToAZID: map[string]string{
|
||||||
|
"azname-a": "azid-1",
|
||||||
|
"azname-b": "azid-2",
|
||||||
|
"azname-c": "azid-3",
|
||||||
|
},
|
||||||
|
ownerID: "owner-id-novpc",
|
||||||
|
instances: []*ec2.Instance{
|
||||||
|
{
|
||||||
|
// set every possible options and test them here
|
||||||
|
Architecture: strptr("architecture-novpc"),
|
||||||
|
ImageId: strptr("ami-novpc"),
|
||||||
|
InstanceId: strptr("instance-id-novpc"),
|
||||||
|
InstanceLifecycle: strptr("instance-lifecycle-novpc"),
|
||||||
|
InstanceType: strptr("instance-type-novpc"),
|
||||||
|
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
|
||||||
|
Platform: strptr("platform-novpc"),
|
||||||
|
PrivateDnsName: strptr("private-dns-novpc"),
|
||||||
|
PrivateIpAddress: strptr("1.2.3.4"),
|
||||||
|
PublicDnsName: strptr("public-dns-novpc"),
|
||||||
|
PublicIpAddress: strptr("42.42.42.2"),
|
||||||
|
State: &ec2.InstanceState{Name: strptr("running")},
|
||||||
|
// test tags once and for all
|
||||||
|
Tags: []*ec2.Tag{
|
||||||
|
{Key: strptr("tag-1-key"), Value: strptr("tag-1-value")},
|
||||||
|
{Key: strptr("tag-2-key"), Value: strptr("tag-2-value")},
|
||||||
|
nil,
|
||||||
|
{Value: strptr("tag-4-value")},
|
||||||
|
{Key: strptr("tag-5-key")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*targetgroup.Group{
|
||||||
|
{
|
||||||
|
Source: "region-novpc",
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": model.LabelValue("1.2.3.4:4242"),
|
||||||
|
"__meta_ec2_ami": model.LabelValue("ami-novpc"),
|
||||||
|
"__meta_ec2_architecture": model.LabelValue("architecture-novpc"),
|
||||||
|
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
|
||||||
|
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
|
||||||
|
"__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"),
|
||||||
|
"__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"),
|
||||||
|
"__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"),
|
||||||
|
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||||
|
"__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"),
|
||||||
|
"__meta_ec2_platform": model.LabelValue("platform-novpc"),
|
||||||
|
"__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"),
|
||||||
|
"__meta_ec2_private_ip": model.LabelValue("1.2.3.4"),
|
||||||
|
"__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"),
|
||||||
|
"__meta_ec2_public_ip": model.LabelValue("42.42.42.2"),
|
||||||
|
"__meta_ec2_region": model.LabelValue("region-novpc"),
|
||||||
|
"__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"),
|
||||||
|
"__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Ipv4",
|
||||||
|
ec2Data: &ec2DataStore{
|
||||||
|
region: "region-ipv4",
|
||||||
|
azToAZID: map[string]string{
|
||||||
|
"azname-a": "azid-1",
|
||||||
|
"azname-b": "azid-2",
|
||||||
|
"azname-c": "azid-3",
|
||||||
|
},
|
||||||
|
instances: []*ec2.Instance{
|
||||||
|
{
|
||||||
|
// just the minimum needed for the refresh work
|
||||||
|
ImageId: strptr("ami-ipv4"),
|
||||||
|
InstanceId: strptr("instance-id-ipv4"),
|
||||||
|
InstanceType: strptr("instance-type-ipv4"),
|
||||||
|
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-c")},
|
||||||
|
PrivateIpAddress: strptr("5.6.7.8"),
|
||||||
|
State: &ec2.InstanceState{Name: strptr("running")},
|
||||||
|
SubnetId: strptr("azid-3"),
|
||||||
|
VpcId: strptr("vpc-ipv4"),
|
||||||
|
// network intefaces
|
||||||
|
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
|
||||||
|
// interface without subnet -> should be ignored
|
||||||
|
{
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||||
|
{
|
||||||
|
Ipv6Address: strptr("2001:db8:1::1"),
|
||||||
|
IsPrimaryIpv6: boolptr(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// interface with subnet, no IPv6
|
||||||
|
{
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||||
|
SubnetId: strptr("azid-3"),
|
||||||
|
},
|
||||||
|
// interface with another subnet, no IPv6
|
||||||
|
{
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||||
|
SubnetId: strptr("azid-1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*targetgroup.Group{
|
||||||
|
{
|
||||||
|
Source: "region-ipv4",
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": model.LabelValue("5.6.7.8:4242"),
|
||||||
|
"__meta_ec2_ami": model.LabelValue("ami-ipv4"),
|
||||||
|
"__meta_ec2_availability_zone": model.LabelValue("azname-c"),
|
||||||
|
"__meta_ec2_availability_zone_id": model.LabelValue("azid-3"),
|
||||||
|
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"),
|
||||||
|
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||||
|
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"),
|
||||||
|
"__meta_ec2_owner_id": model.LabelValue(""),
|
||||||
|
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"),
|
||||||
|
"__meta_ec2_private_ip": model.LabelValue("5.6.7.8"),
|
||||||
|
"__meta_ec2_region": model.LabelValue("region-ipv4"),
|
||||||
|
"__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"),
|
||||||
|
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Ipv6",
|
||||||
|
ec2Data: &ec2DataStore{
|
||||||
|
region: "region-ipv6",
|
||||||
|
azToAZID: map[string]string{
|
||||||
|
"azname-a": "azid-1",
|
||||||
|
"azname-b": "azid-2",
|
||||||
|
"azname-c": "azid-3",
|
||||||
|
},
|
||||||
|
instances: []*ec2.Instance{
|
||||||
|
{
|
||||||
|
// just the minimum needed for the refresh work
|
||||||
|
ImageId: strptr("ami-ipv6"),
|
||||||
|
InstanceId: strptr("instance-id-ipv6"),
|
||||||
|
InstanceType: strptr("instance-type-ipv6"),
|
||||||
|
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
|
||||||
|
PrivateIpAddress: strptr("9.10.11.12"),
|
||||||
|
State: &ec2.InstanceState{Name: strptr("running")},
|
||||||
|
SubnetId: strptr("azid-2"),
|
||||||
|
VpcId: strptr("vpc-ipv6"),
|
||||||
|
// network intefaces
|
||||||
|
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
|
||||||
|
// interface without primary IPv6, index 2
|
||||||
|
{
|
||||||
|
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||||
|
DeviceIndex: int64ptr(3),
|
||||||
|
},
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||||
|
{
|
||||||
|
Ipv6Address: strptr("2001:db8:2::1:1"),
|
||||||
|
IsPrimaryIpv6: boolptr(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SubnetId: strptr("azid-2"),
|
||||||
|
},
|
||||||
|
// interface with primary IPv6, index 1
|
||||||
|
{
|
||||||
|
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||||
|
DeviceIndex: int64ptr(1),
|
||||||
|
},
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||||
|
{
|
||||||
|
Ipv6Address: strptr("2001:db8:2::2:1"),
|
||||||
|
IsPrimaryIpv6: boolptr(false),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Ipv6Address: strptr("2001:db8:2::2:2"),
|
||||||
|
IsPrimaryIpv6: boolptr(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SubnetId: strptr("azid-2"),
|
||||||
|
},
|
||||||
|
// interface with primary IPv6, index 3
|
||||||
|
{
|
||||||
|
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||||
|
DeviceIndex: int64ptr(3),
|
||||||
|
},
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||||
|
{
|
||||||
|
Ipv6Address: strptr("2001:db8:2::3:1"),
|
||||||
|
IsPrimaryIpv6: boolptr(true),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SubnetId: strptr("azid-1"),
|
||||||
|
},
|
||||||
|
// interface without primary IPv6, index 0
|
||||||
|
{
|
||||||
|
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||||
|
DeviceIndex: int64ptr(0),
|
||||||
|
},
|
||||||
|
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||||
|
SubnetId: strptr("azid-3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: []*targetgroup.Group{
|
||||||
|
{
|
||||||
|
Source: "region-ipv6",
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": model.LabelValue("9.10.11.12:4242"),
|
||||||
|
"__meta_ec2_ami": model.LabelValue("ami-ipv6"),
|
||||||
|
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
|
||||||
|
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
|
||||||
|
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"),
|
||||||
|
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||||
|
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"),
|
||||||
|
"__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"),
|
||||||
|
"__meta_ec2_owner_id": model.LabelValue(""),
|
||||||
|
"__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"),
|
||||||
|
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"),
|
||||||
|
"__meta_ec2_private_ip": model.LabelValue("9.10.11.12"),
|
||||||
|
"__meta_ec2_region": model.LabelValue("region-ipv6"),
|
||||||
|
"__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"),
|
||||||
|
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
client := newMockEC2Client(tt.ec2Data)
|
||||||
|
|
||||||
|
d := &EC2Discovery{
|
||||||
|
ec2: client,
|
||||||
|
cfg: &EC2SDConfig{
|
||||||
|
Port: 4242,
|
||||||
|
Region: client.ec2Data.region,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
g, err := d.refresh(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expected, g)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EC2 client mock.
|
||||||
|
type mockEC2Client struct {
|
||||||
|
ec2iface.EC2API
|
||||||
|
ec2Data ec2DataStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client {
|
||||||
|
client := mockEC2Client{
|
||||||
|
ec2Data: *ec2Data,
|
||||||
|
}
|
||||||
|
return &client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) {
|
||||||
|
if len(m.ec2Data.azToAZID) == 0 {
|
||||||
|
return nil, errors.New("No AZs found")
|
||||||
|
}
|
||||||
|
|
||||||
|
azs := make([]*ec2.AvailabilityZone, len(m.ec2Data.azToAZID))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for k, v := range m.ec2Data.azToAZID {
|
||||||
|
azs[i] = &ec2.AvailabilityZone{
|
||||||
|
ZoneName: strptr(k),
|
||||||
|
ZoneId: strptr(v),
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ec2.DescribeAvailabilityZonesOutput{
|
||||||
|
AvailabilityZones: azs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error {
|
||||||
|
r := ec2.Reservation{}
|
||||||
|
r.SetInstances(m.ec2Data.instances)
|
||||||
|
r.SetOwnerId(m.ec2Data.ownerID)
|
||||||
|
|
||||||
|
o := ec2.DescribeInstancesOutput{}
|
||||||
|
o.SetReservations([]*ec2.Reservation{&r})
|
||||||
|
|
||||||
|
_ = fn(&o, true)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -29,10 +30,10 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/lightsail"
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
@ -130,14 +131,14 @@ type LightsailDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
||||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||||
m, ok := metrics.(*lightsailMetrics)
|
m, ok := metrics.(*lightsailMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &LightsailDiscovery{
|
d := &LightsailDiscovery{
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -35,10 +36,9 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||||
cache "github.com/Code-Hex/go-generics-cache"
|
cache "github.com/Code-Hex/go-generics-cache"
|
||||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -175,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
cfg *SDConfig
|
cfg *SDConfig
|
||||||
port int
|
port int
|
||||||
cache *cache.Cache[string, *armnetwork.Interface]
|
cache *cache.Cache[string, *armnetwork.Interface]
|
||||||
|
@ -183,14 +183,14 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*azureMetrics)
|
m, ok := metrics.(*azureMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
|
@ -228,26 +228,26 @@ type azureClient struct {
|
||||||
vm *armcompute.VirtualMachinesClient
|
vm *armcompute.VirtualMachinesClient
|
||||||
vmss *armcompute.VirtualMachineScaleSetsClient
|
vmss *armcompute.VirtualMachineScaleSetsClient
|
||||||
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ client = &azureClient{}
|
var _ client = &azureClient{}
|
||||||
|
|
||||||
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
// createAzureClient is a helper method for creating an Azure compute client to ARM.
|
||||||
func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
|
func (d *Discovery) createAzureClient() (client, error) {
|
||||||
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
cloudConfiguration, err := CloudConfigurationFromName(d.cfg.Environment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var c azureClient
|
var c azureClient
|
||||||
c.logger = logger
|
c.logger = d.logger
|
||||||
|
|
||||||
telemetry := policy.TelemetryOptions{
|
telemetry := policy.TelemetryOptions{
|
||||||
ApplicationID: userAgent,
|
ApplicationID: userAgent,
|
||||||
}
|
}
|
||||||
|
|
||||||
credential, err := newCredential(cfg, policy.ClientOptions{
|
credential, err := newCredential(*d.cfg, policy.ClientOptions{
|
||||||
Cloud: cloudConfiguration,
|
Cloud: cloudConfiguration,
|
||||||
Telemetry: telemetry,
|
Telemetry: telemetry,
|
||||||
})
|
})
|
||||||
|
@ -255,7 +255,7 @@ func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
|
client, err := config_util.NewClientFromConfig(d.cfg.HTTPClientConfig, "azure_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
@ -267,22 +267,22 @@ func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
|
c.vm, err = armcompute.NewVirtualMachinesClient(d.cfg.SubscriptionID, credential, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
|
c.nic, err = armnetwork.NewInterfacesClient(d.cfg.SubscriptionID, credential, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
|
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(d.cfg.SubscriptionID, credential, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
|
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(d.cfg.SubscriptionID, credential, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
}
|
}
|
||||||
|
@ -337,35 +337,27 @@ type virtualMachine struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new azureResource object from an ID string.
|
// Create a new azureResource object from an ID string.
|
||||||
func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) {
|
func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
resourceID, err := arm.ParseResourceID(id)
|
resourceID, err := arm.ParseResourceID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
||||||
level.Error(logger).Log("err", err)
|
logger.Error("Failed to parse resource ID", "err", err)
|
||||||
return &arm.ResourceID{}, err
|
return &arm.ResourceID{}, err
|
||||||
}
|
}
|
||||||
return resourceID, nil
|
return resourceID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *Discovery) refreshAzureClient(ctx context.Context, client client) ([]*targetgroup.Group, error) {
|
||||||
defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
|
|
||||||
|
|
||||||
client, err := createAzureClient(*d.cfg, d.logger)
|
|
||||||
if err != nil {
|
|
||||||
d.metrics.failuresCount.Inc()
|
|
||||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.metrics.failuresCount.Inc()
|
d.metrics.failuresCount.Inc()
|
||||||
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines))
|
d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines))
|
||||||
|
|
||||||
// Load the vms managed by scale sets.
|
// Load the vms managed by scale sets.
|
||||||
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
||||||
|
@ -418,6 +410,18 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
return []*targetgroup.Group{&tg}, nil
|
return []*targetgroup.Group{&tg}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
|
defer d.logger.Debug("Azure discovery completed")
|
||||||
|
|
||||||
|
client, err := d.createAzureClient()
|
||||||
|
if err != nil {
|
||||||
|
d.metrics.failuresCount.Inc()
|
||||||
|
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.refreshAzureClient(ctx, client)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
|
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
|
||||||
r, err := newAzureResourceFromID(vm.ID, d.logger)
|
r, err := newAzureResourceFromID(vm.ID, d.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -459,7 +463,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, errorNotFound) {
|
if errors.Is(err, errorNotFound) {
|
||||||
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
d.logger.Warn("Network interface does not exist", "name", nicID, "err", err)
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -480,7 +484,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
||||||
// yet support this. On deallocated machines, this value happens to be nil so it
|
// yet support this. On deallocated machines, this value happens to be nil so it
|
||||||
// is a cheap and easy way to determine if a machine is allocated or not.
|
// is a cheap and easy way to determine if a machine is allocated or not.
|
||||||
if networkInterface.Properties.Primary == nil {
|
if networkInterface.Properties.Primary == nil {
|
||||||
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
|
d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -724,7 +728,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) {
|
||||||
rs := time.Duration(random) * time.Second
|
rs := time.Duration(random) * time.Second
|
||||||
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
|
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
|
||||||
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
|
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
|
||||||
level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds())
|
d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
// getFromCache will get the network Interface for the specified nicID
|
// getFromCache will get the network Interface for the specified nicID
|
||||||
|
|
|
@ -15,19 +15,34 @@ package azure
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||||
|
azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
|
||||||
|
fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake"
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||||
|
fakenetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake"
|
||||||
cache "github.com/Code-Hex/go-generics-cache"
|
cache "github.com/Code-Hex/go-generics-cache"
|
||||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||||
"github.com/go-kit/log"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
|
|
||||||
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultMockNetworkID string = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
goleak.VerifyTestMain(m,
|
goleak.VerifyTestMain(m,
|
||||||
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
|
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
|
||||||
|
@ -96,13 +111,12 @@ func TestVMToLabelSet(t *testing.T) {
|
||||||
vmType := "type"
|
vmType := "type"
|
||||||
location := "westeurope"
|
location := "westeurope"
|
||||||
computerName := "computer_name"
|
computerName := "computer_name"
|
||||||
networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1"
|
|
||||||
ipAddress := "10.20.30.40"
|
ipAddress := "10.20.30.40"
|
||||||
primary := true
|
primary := true
|
||||||
networkProfile := armcompute.NetworkProfile{
|
networkProfile := armcompute.NetworkProfile{
|
||||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
||||||
{
|
{
|
||||||
ID: &networkID,
|
ID: to.Ptr(defaultMockNetworkID),
|
||||||
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
|
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -139,7 +153,7 @@ func TestVMToLabelSet(t *testing.T) {
|
||||||
Location: location,
|
Location: location,
|
||||||
OsType: "Linux",
|
OsType: "Linux",
|
||||||
Tags: map[string]*string{},
|
Tags: map[string]*string{},
|
||||||
NetworkInterfaces: []string{networkID},
|
NetworkInterfaces: []string{defaultMockNetworkID},
|
||||||
Size: size,
|
Size: size,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,11 +164,12 @@ func TestVMToLabelSet(t *testing.T) {
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
cfg: &cfg,
|
cfg: &cfg,
|
||||||
logger: log.NewNopLogger(),
|
logger: promslog.NewNopLogger(),
|
||||||
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
|
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
|
||||||
}
|
}
|
||||||
network := armnetwork.Interface{
|
network := armnetwork.Interface{
|
||||||
Name: &networkID,
|
Name: to.Ptr(defaultMockNetworkID),
|
||||||
|
ID: to.Ptr(defaultMockNetworkID),
|
||||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||||
Primary: &primary,
|
Primary: &primary,
|
||||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||||
|
@ -164,9 +179,9 @@ func TestVMToLabelSet(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
client := &mockAzureClient{
|
|
||||||
networkInterface: &network,
|
client := createMockAzureClient(t, nil, nil, nil, network, nil)
|
||||||
}
|
|
||||||
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
|
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, labelSet, 11)
|
require.Len(t, labelSet, 11)
|
||||||
|
@ -475,34 +490,372 @@ func TestNewAzureResourceFromID(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAzureRefresh(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
scenario string
|
||||||
|
vmResp []armcompute.VirtualMachinesClientListAllResponse
|
||||||
|
vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse
|
||||||
|
vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse
|
||||||
|
interfacesResp armnetwork.Interface
|
||||||
|
expectedTG []*targetgroup.Group
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
scenario: "VMs, VMSS and VMSSVMs in Multiple Responses",
|
||||||
|
vmResp: []armcompute.VirtualMachinesClientListAllResponse{
|
||||||
|
{
|
||||||
|
VirtualMachineListResult: armcompute.VirtualMachineListResult{
|
||||||
|
Value: []*armcompute.VirtualMachine{
|
||||||
|
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1"), to.Ptr("vm1")),
|
||||||
|
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2"), to.Ptr("vm2")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
VirtualMachineListResult: armcompute.VirtualMachineListResult{
|
||||||
|
Value: []*armcompute.VirtualMachine{
|
||||||
|
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3"), to.Ptr("vm3")),
|
||||||
|
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4"), to.Ptr("vm4")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
vmssResp: []armcompute.VirtualMachineScaleSetsClientListAllResponse{
|
||||||
|
{
|
||||||
|
VirtualMachineScaleSetListWithLinkResult: armcompute.VirtualMachineScaleSetListWithLinkResult{
|
||||||
|
Value: []*armcompute.VirtualMachineScaleSet{
|
||||||
|
{
|
||||||
|
ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1"),
|
||||||
|
Name: to.Ptr("vmScaleSet1"),
|
||||||
|
Location: to.Ptr("australiaeast"),
|
||||||
|
Type: to.Ptr("Microsoft.Compute/virtualMachineScaleSets"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
vmssvmResp: []armcompute.VirtualMachineScaleSetVMsClientListResponse{
|
||||||
|
{
|
||||||
|
VirtualMachineScaleSetVMListResult: armcompute.VirtualMachineScaleSetVMListResult{
|
||||||
|
Value: []*armcompute.VirtualMachineScaleSetVM{
|
||||||
|
defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1"), to.Ptr("vmScaleSet1_vm1")),
|
||||||
|
defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2"), to.Ptr("vmScaleSet1_vm2")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
interfacesResp: armnetwork.Interface{
|
||||||
|
ID: to.Ptr(defaultMockNetworkID),
|
||||||
|
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||||
|
Primary: to.Ptr(true),
|
||||||
|
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||||
|
{Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||||
|
PrivateIPAddress: to.Ptr("10.0.0.1"),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedTG: []*targetgroup.Group{
|
||||||
|
{
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": "10.0.0.1:80",
|
||||||
|
"__meta_azure_machine_computer_name": "computer_name",
|
||||||
|
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1",
|
||||||
|
"__meta_azure_machine_location": "australiaeast",
|
||||||
|
"__meta_azure_machine_name": "vm1",
|
||||||
|
"__meta_azure_machine_os_type": "Linux",
|
||||||
|
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||||
|
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||||
|
"__meta_azure_machine_size": "size",
|
||||||
|
"__meta_azure_machine_tag_prometheus": "",
|
||||||
|
"__meta_azure_subscription_id": "",
|
||||||
|
"__meta_azure_tenant_id": "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "10.0.0.1:80",
|
||||||
|
"__meta_azure_machine_computer_name": "computer_name",
|
||||||
|
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2",
|
||||||
|
"__meta_azure_machine_location": "australiaeast",
|
||||||
|
"__meta_azure_machine_name": "vm2",
|
||||||
|
"__meta_azure_machine_os_type": "Linux",
|
||||||
|
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||||
|
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||||
|
"__meta_azure_machine_size": "size",
|
||||||
|
"__meta_azure_machine_tag_prometheus": "",
|
||||||
|
"__meta_azure_subscription_id": "",
|
||||||
|
"__meta_azure_tenant_id": "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "10.0.0.1:80",
|
||||||
|
"__meta_azure_machine_computer_name": "computer_name",
|
||||||
|
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3",
|
||||||
|
"__meta_azure_machine_location": "australiaeast",
|
||||||
|
"__meta_azure_machine_name": "vm3",
|
||||||
|
"__meta_azure_machine_os_type": "Linux",
|
||||||
|
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||||
|
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||||
|
"__meta_azure_machine_size": "size",
|
||||||
|
"__meta_azure_machine_tag_prometheus": "",
|
||||||
|
"__meta_azure_subscription_id": "",
|
||||||
|
"__meta_azure_tenant_id": "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "10.0.0.1:80",
|
||||||
|
"__meta_azure_machine_computer_name": "computer_name",
|
||||||
|
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4",
|
||||||
|
"__meta_azure_machine_location": "australiaeast",
|
||||||
|
"__meta_azure_machine_name": "vm4",
|
||||||
|
"__meta_azure_machine_os_type": "Linux",
|
||||||
|
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||||
|
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||||
|
"__meta_azure_machine_size": "size",
|
||||||
|
"__meta_azure_machine_tag_prometheus": "",
|
||||||
|
"__meta_azure_subscription_id": "",
|
||||||
|
"__meta_azure_tenant_id": "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "10.0.0.1:80",
|
||||||
|
"__meta_azure_machine_computer_name": "computer_name",
|
||||||
|
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1",
|
||||||
|
"__meta_azure_machine_location": "australiaeast",
|
||||||
|
"__meta_azure_machine_name": "vmScaleSet1_vm1",
|
||||||
|
"__meta_azure_machine_os_type": "Linux",
|
||||||
|
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||||
|
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||||
|
"__meta_azure_machine_scale_set": "vmScaleSet1",
|
||||||
|
"__meta_azure_machine_size": "size",
|
||||||
|
"__meta_azure_machine_tag_prometheus": "",
|
||||||
|
"__meta_azure_subscription_id": "",
|
||||||
|
"__meta_azure_tenant_id": "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "10.0.0.1:80",
|
||||||
|
"__meta_azure_machine_computer_name": "computer_name",
|
||||||
|
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2",
|
||||||
|
"__meta_azure_machine_location": "australiaeast",
|
||||||
|
"__meta_azure_machine_name": "vmScaleSet1_vm2",
|
||||||
|
"__meta_azure_machine_os_type": "Linux",
|
||||||
|
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||||
|
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||||
|
"__meta_azure_machine_scale_set": "vmScaleSet1",
|
||||||
|
"__meta_azure_machine_size": "size",
|
||||||
|
"__meta_azure_machine_tag_prometheus": "",
|
||||||
|
"__meta_azure_subscription_id": "",
|
||||||
|
"__meta_azure_tenant_id": "",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.scenario, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
azureSDConfig := &DefaultSDConfig
|
||||||
|
|
||||||
|
azureClient := createMockAzureClient(t, tc.vmResp, tc.vmssResp, tc.vmssvmResp, tc.interfacesResp, nil)
|
||||||
|
|
||||||
|
reg := prometheus.NewRegistry()
|
||||||
|
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||||
|
metrics := azureSDConfig.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
|
|
||||||
|
sd, err := NewDiscovery(azureSDConfig, nil, metrics)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tg, err := sd.refreshAzureClient(context.Background(), azureClient)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sortTargetsByID(tg[0].Targets)
|
||||||
|
require.Equal(t, tc.expectedTG, tg)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type mockAzureClient struct {
|
type mockAzureClient struct {
|
||||||
networkInterface *armnetwork.Interface
|
azureClient
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ client = &mockAzureClient{}
|
func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClientListAllResponse, vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse, vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse, interfaceResp armnetwork.Interface, logger *slog.Logger) client {
|
||||||
|
t.Helper()
|
||||||
|
mockVMServer := defaultMockVMServer(vmResp)
|
||||||
|
mockVMSSServer := defaultMockVMSSServer(vmssResp)
|
||||||
|
mockVMScaleSetVMServer := defaultMockVMSSVMServer(vmssvmResp)
|
||||||
|
mockInterfaceServer := defaultMockInterfaceServer(interfaceResp)
|
||||||
|
|
||||||
func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
|
vmClient, err := armcompute.NewVirtualMachinesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||||
return nil, nil
|
ClientOptions: azcore.ClientOptions{
|
||||||
|
Transport: fake.NewVirtualMachinesServerTransport(&mockVMServer),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
vmssClient, err := armcompute.NewVirtualMachineScaleSetsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||||
|
ClientOptions: azcore.ClientOptions{
|
||||||
|
Transport: fake.NewVirtualMachineScaleSetsServerTransport(&mockVMSSServer),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
vmssvmClient, err := armcompute.NewVirtualMachineScaleSetVMsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||||
|
ClientOptions: azcore.ClientOptions{
|
||||||
|
Transport: fake.NewVirtualMachineScaleSetVMsServerTransport(&mockVMScaleSetVMServer),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
interfacesClient, err := armnetwork.NewInterfacesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||||
|
ClientOptions: azcore.ClientOptions{
|
||||||
|
Transport: fakenetwork.NewInterfacesServerTransport(&mockInterfaceServer),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return &mockAzureClient{
|
||||||
|
azureClient: azureClient{
|
||||||
|
vm: vmClient,
|
||||||
|
vmss: vmssClient,
|
||||||
|
vmssvm: vmssvmClient,
|
||||||
|
nic: interfacesClient,
|
||||||
|
logger: logger,
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
|
func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer {
|
||||||
return nil, nil
|
return fakenetwork.InterfacesServer{
|
||||||
|
Get: func(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) {
|
||||||
|
resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil)
|
||||||
|
return
|
||||||
|
},
|
||||||
|
GetVirtualMachineScaleSetNetworkInterface: func(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) {
|
||||||
|
resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil)
|
||||||
|
return
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer {
|
||||||
return nil, nil
|
return fake.VirtualMachinesServer{
|
||||||
|
NewListAllPager: func(options *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) {
|
||||||
|
for _, page := range vmResp {
|
||||||
|
resp.AddPage(http.StatusOK, page, nil)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
|
func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer {
|
||||||
if networkInterfaceID == "" {
|
return fake.VirtualMachineScaleSetsServer{
|
||||||
return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty")
|
NewListAllPager: func(options *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) {
|
||||||
|
for _, page := range vmssResp {
|
||||||
|
resp.AddPage(http.StatusOK, page, nil)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return m.networkInterface, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
|
func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer {
|
||||||
if scaleSetName == "" {
|
return fake.VirtualMachineScaleSetVMsServer{
|
||||||
return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty")
|
NewListPager: func(resourceGroupName, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) {
|
||||||
|
for _, page := range vmssvmResp {
|
||||||
|
resp.AddPage(http.StatusOK, page, nil)
|
||||||
}
|
}
|
||||||
return m.networkInterface, nil
|
return
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultVMWithIDAndName(id, name *string) *armcompute.VirtualMachine {
|
||||||
|
vmSize := armcompute.VirtualMachineSizeTypes("size")
|
||||||
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
|
defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/testVM"
|
||||||
|
defaultName := "testVM"
|
||||||
|
|
||||||
|
if id == nil {
|
||||||
|
id = &defaultID
|
||||||
|
}
|
||||||
|
if name == nil {
|
||||||
|
name = &defaultName
|
||||||
|
}
|
||||||
|
|
||||||
|
return &armcompute.VirtualMachine{
|
||||||
|
ID: id,
|
||||||
|
Name: name,
|
||||||
|
Type: to.Ptr("Microsoft.Compute/virtualMachines"),
|
||||||
|
Location: to.Ptr("australiaeast"),
|
||||||
|
Properties: &armcompute.VirtualMachineProperties{
|
||||||
|
OSProfile: &armcompute.OSProfile{
|
||||||
|
ComputerName: to.Ptr("computer_name"),
|
||||||
|
},
|
||||||
|
StorageProfile: &armcompute.StorageProfile{
|
||||||
|
OSDisk: &armcompute.OSDisk{
|
||||||
|
OSType: &osType,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NetworkProfile: &armcompute.NetworkProfile{
|
||||||
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
||||||
|
{
|
||||||
|
ID: to.Ptr(defaultMockNetworkID),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
HardwareProfile: &armcompute.HardwareProfile{
|
||||||
|
VMSize: &vmSize,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tags: map[string]*string{
|
||||||
|
"prometheus": new(string),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultVMSSVMWithIDAndName(id, name *string) *armcompute.VirtualMachineScaleSetVM {
|
||||||
|
vmSize := armcompute.VirtualMachineSizeTypes("size")
|
||||||
|
osType := armcompute.OperatingSystemTypesLinux
|
||||||
|
defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/testVMScaleSet/virtualMachines/testVM"
|
||||||
|
defaultName := "testVM"
|
||||||
|
|
||||||
|
if id == nil {
|
||||||
|
id = &defaultID
|
||||||
|
}
|
||||||
|
if name == nil {
|
||||||
|
name = &defaultName
|
||||||
|
}
|
||||||
|
|
||||||
|
return &armcompute.VirtualMachineScaleSetVM{
|
||||||
|
ID: id,
|
||||||
|
Name: name,
|
||||||
|
Type: to.Ptr("Microsoft.Compute/virtualMachines"),
|
||||||
|
InstanceID: to.Ptr("123"),
|
||||||
|
Location: to.Ptr("australiaeast"),
|
||||||
|
Properties: &armcompute.VirtualMachineScaleSetVMProperties{
|
||||||
|
OSProfile: &armcompute.OSProfile{
|
||||||
|
ComputerName: to.Ptr("computer_name"),
|
||||||
|
},
|
||||||
|
StorageProfile: &armcompute.StorageProfile{
|
||||||
|
OSDisk: &armcompute.OSDisk{
|
||||||
|
OSType: &osType,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NetworkProfile: &armcompute.NetworkProfile{
|
||||||
|
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
||||||
|
{ID: to.Ptr(defaultMockNetworkID)},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
HardwareProfile: &armcompute.HardwareProfile{
|
||||||
|
VMSize: &vmSize,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Tags: map[string]*string{
|
||||||
|
"prometheus": new(string),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortTargetsByID(targets []model.LabelSet) {
|
||||||
|
slices.SortFunc(targets, func(a, b model.LabelSet) int {
|
||||||
|
return strings.Compare(string(a["__meta_azure_machine_id"]), string(b["__meta_azure_machine_id"]))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,17 +17,17 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
consul "github.com/hashicorp/consul/api"
|
consul "github.com/hashicorp/consul/api"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -113,8 +113,11 @@ type SDConfig struct {
|
||||||
Services []string `yaml:"services,omitempty"`
|
Services []string `yaml:"services,omitempty"`
|
||||||
// A list of tags used to filter instances inside a service. Services must contain all tags in the list.
|
// A list of tags used to filter instances inside a service. Services must contain all tags in the list.
|
||||||
ServiceTags []string `yaml:"tags,omitempty"`
|
ServiceTags []string `yaml:"tags,omitempty"`
|
||||||
// Desired node metadata.
|
// Desired node metadata. As of Consul 1.14, consider `filter` instead.
|
||||||
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
||||||
|
// Consul filter string
|
||||||
|
// See https://www.consul.io/api-docs/catalog#filtering-1, for syntax
|
||||||
|
Filter string `yaml:"filter,omitempty"`
|
||||||
|
|
||||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
@ -174,22 +177,23 @@ type Discovery struct {
|
||||||
watchedServices []string // Set of services which will be discovered.
|
watchedServices []string // Set of services which will be discovered.
|
||||||
watchedTags []string // Tags used to filter instances of a service.
|
watchedTags []string // Tags used to filter instances of a service.
|
||||||
watchedNodeMeta map[string]string
|
watchedNodeMeta map[string]string
|
||||||
|
watchedFilter string
|
||||||
allowStale bool
|
allowStale bool
|
||||||
refreshInterval time.Duration
|
refreshInterval time.Duration
|
||||||
finalizer func()
|
finalizer func()
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
metrics *consulMetrics
|
metrics *consulMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery for the given config.
|
// NewDiscovery returns a new Discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*consulMetrics)
|
m, ok := metrics.(*consulMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
||||||
|
@ -218,6 +222,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
|
||||||
watchedServices: conf.Services,
|
watchedServices: conf.Services,
|
||||||
watchedTags: conf.ServiceTags,
|
watchedTags: conf.ServiceTags,
|
||||||
watchedNodeMeta: conf.NodeMeta,
|
watchedNodeMeta: conf.NodeMeta,
|
||||||
|
watchedFilter: conf.Filter,
|
||||||
allowStale: conf.AllowStale,
|
allowStale: conf.AllowStale,
|
||||||
refreshInterval: time.Duration(conf.RefreshInterval),
|
refreshInterval: time.Duration(conf.RefreshInterval),
|
||||||
clientDatacenter: conf.Datacenter,
|
clientDatacenter: conf.Datacenter,
|
||||||
|
@ -236,7 +241,7 @@ func (d *Discovery) shouldWatch(name string, tags []string) bool {
|
||||||
return d.shouldWatchFromName(name) && d.shouldWatchFromTags(tags)
|
return d.shouldWatchFromName(name) && d.shouldWatchFromTags(tags)
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldWatch returns whether the service of the given name should be watched based on its name.
|
// shouldWatchFromName returns whether the service of the given name should be watched based on its name.
|
||||||
func (d *Discovery) shouldWatchFromName(name string) bool {
|
func (d *Discovery) shouldWatchFromName(name string) bool {
|
||||||
// If there's no fixed set of watched services, we watch everything.
|
// If there's no fixed set of watched services, we watch everything.
|
||||||
if len(d.watchedServices) == 0 {
|
if len(d.watchedServices) == 0 {
|
||||||
|
@ -251,7 +256,7 @@ func (d *Discovery) shouldWatchFromName(name string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldWatch returns whether the service of the given name should be watched based on its tags.
|
// shouldWatchFromTags returns whether the service of the given name should be watched based on its tags.
|
||||||
// This gets called when the user doesn't specify a list of services in order to avoid watching
|
// This gets called when the user doesn't specify a list of services in order to avoid watching
|
||||||
// *all* services. Details in https://github.com/prometheus/prometheus/pull/3814
|
// *all* services. Details in https://github.com/prometheus/prometheus/pull/3814
|
||||||
func (d *Discovery) shouldWatchFromTags(tags []string) bool {
|
func (d *Discovery) shouldWatchFromTags(tags []string) bool {
|
||||||
|
@ -282,7 +287,7 @@ func (d *Discovery) getDatacenter() error {
|
||||||
|
|
||||||
info, err := d.client.Agent().Self()
|
info, err := d.client.Agent().Self()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
d.logger.Error("Error retrieving datacenter name", "err", err)
|
||||||
d.metrics.rpcFailuresCount.Inc()
|
d.metrics.rpcFailuresCount.Inc()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -290,12 +295,12 @@ func (d *Discovery) getDatacenter() error {
|
||||||
dc, ok := info["Config"]["Datacenter"].(string)
|
dc, ok := info["Config"]["Datacenter"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
||||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
d.logger.Error("Error retrieving datacenter name", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.clientDatacenter = dc
|
d.clientDatacenter = dc
|
||||||
d.logger = log.With(d.logger, "datacenter", dc)
|
d.logger = d.logger.With("datacenter", dc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -361,13 +366,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
// entire list of services.
|
// entire list of services.
|
||||||
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
|
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
|
||||||
catalog := d.client.Catalog()
|
catalog := d.client.Catalog()
|
||||||
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
|
d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter)
|
||||||
|
|
||||||
opts := &consul.QueryOptions{
|
opts := &consul.QueryOptions{
|
||||||
WaitIndex: *lastIndex,
|
WaitIndex: *lastIndex,
|
||||||
WaitTime: watchTimeout,
|
WaitTime: watchTimeout,
|
||||||
AllowStale: d.allowStale,
|
AllowStale: d.allowStale,
|
||||||
NodeMeta: d.watchedNodeMeta,
|
NodeMeta: d.watchedNodeMeta,
|
||||||
|
Filter: d.watchedFilter,
|
||||||
}
|
}
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
||||||
|
@ -382,7 +388,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
d.logger.Error("Error refreshing service list", "err", err)
|
||||||
d.metrics.rpcFailuresCount.Inc()
|
d.metrics.rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
return
|
return
|
||||||
|
@ -445,7 +451,7 @@ type consulService struct {
|
||||||
discovery *Discovery
|
discovery *Discovery
|
||||||
client *consul.Client
|
client *consul.Client
|
||||||
tagSeparator string
|
tagSeparator string
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
rpcFailuresCount prometheus.Counter
|
rpcFailuresCount prometheus.Counter
|
||||||
serviceRPCDuration prometheus.Observer
|
serviceRPCDuration prometheus.Observer
|
||||||
}
|
}
|
||||||
|
@ -490,7 +496,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
|
||||||
|
|
||||||
// Get updates for a service.
|
// Get updates for a service.
|
||||||
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
|
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
|
||||||
level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
|
srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
|
||||||
|
|
||||||
opts := &consul.QueryOptions{
|
opts := &consul.QueryOptions{
|
||||||
WaitIndex: *lastIndex,
|
WaitIndex: *lastIndex,
|
||||||
|
@ -513,7 +519,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
||||||
srv.rpcFailuresCount.Inc()
|
srv.rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
return
|
return
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
||||||
case "/v1/catalog/services?index=1&wait=120000ms":
|
case "/v1/catalog/services?index=1&wait=120000ms":
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
response = ServicesTestAnswer
|
response = ServicesTestAnswer
|
||||||
|
case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms":
|
||||||
|
response = ServicesTestAnswer
|
||||||
default:
|
default:
|
||||||
t.Errorf("Unhandled consul call: %s", r.URL)
|
t.Errorf("Unhandled consul call: %s", r.URL)
|
||||||
}
|
}
|
||||||
|
@ -270,7 +272,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
||||||
logger := log.NewNopLogger()
|
logger := promslog.NewNopLogger()
|
||||||
|
|
||||||
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
|
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
|
||||||
|
|
||||||
|
@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) {
|
||||||
<-ch
|
<-ch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Watch the test service with a specific tag and node-meta via Filter parameter.
|
||||||
|
func TestFilterOption(t *testing.T) {
|
||||||
|
stub, config := newServer(t)
|
||||||
|
defer stub.Close()
|
||||||
|
|
||||||
|
config.Services = []string{"test"}
|
||||||
|
config.Filter = `NodeMeta.rack_name == "2304"`
|
||||||
|
config.Token = "fake-token"
|
||||||
|
|
||||||
|
d := newDiscovery(t, config)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ch := make(chan []*targetgroup.Group)
|
||||||
|
go func() {
|
||||||
|
d.Run(ctx, ch)
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
checkOneTarget(t, <-ch)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetDatacenterShouldReturnError(t *testing.T) {
|
func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
handler func(http.ResponseWriter, *http.Request)
|
handler func(http.ResponseWriter, *http.Request)
|
||||||
|
@ -407,7 +430,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
err = d.getDatacenter()
|
err = d.getDatacenter()
|
||||||
|
|
||||||
// An error should be returned.
|
// An error should be returned.
|
||||||
require.Equal(t, tc.errMessage, err.Error())
|
require.EqualError(t, err, tc.errMessage)
|
||||||
// Should still be empty.
|
// Should still be empty.
|
||||||
require.Equal(t, "", d.clientDatacenter)
|
require.Equal(t, "", d.clientDatacenter)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,9 @@ package digitalocean
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -23,7 +25,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/digitalocean/godo"
|
"github.com/digitalocean/godo"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -111,10 +112,10 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*digitaloceanMetrics)
|
m, ok := metrics.(*digitaloceanMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
|
|
|
@ -19,9 +19,9 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -15,9 +15,9 @@ package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log/slog"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ type DiscovererMetrics interface {
|
||||||
|
|
||||||
// DiscovererOptions provides options for a Discoverer.
|
// DiscovererOptions provides options for a Discoverer.
|
||||||
type DiscovererOptions struct {
|
type DiscovererOptions struct {
|
||||||
Logger log.Logger
|
Logger *slog.Logger
|
||||||
|
|
||||||
Metrics DiscovererMetrics
|
Metrics DiscovererMetrics
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) {
|
||||||
|
|
||||||
// UnmarshalYAML implements yaml.Unmarshaler.
|
// UnmarshalYAML implements yaml.Unmarshaler.
|
||||||
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
cfgTyp := getConfigType(configsType)
|
cfgTyp := reflect.StructOf(configFields)
|
||||||
cfgPtr := reflect.New(cfgTyp)
|
cfgPtr := reflect.New(cfgTyp)
|
||||||
cfgVal := cfgPtr.Elem()
|
cfgVal := cfgPtr.Elem()
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
// MarshalYAML implements yaml.Marshaler.
|
// MarshalYAML implements yaml.Marshaler.
|
||||||
func (c Configs) MarshalYAML() (interface{}, error) {
|
func (c Configs) MarshalYAML() (interface{}, error) {
|
||||||
cfgTyp := getConfigType(configsType)
|
cfgTyp := reflect.StructOf(configFields)
|
||||||
cfgPtr := reflect.New(cfgTyp)
|
cfgPtr := reflect.New(cfgTyp)
|
||||||
cfgVal := cfgPtr.Elem()
|
cfgVal := cfgPtr.Elem()
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2019 The Prometheus Authors
|
// Copyright 2024 The Prometheus Authors
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
|
@ -11,25 +11,26 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package testutil
|
package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/stretchr/testify/require"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type logger struct {
|
func TestConfigsCustomUnMarshalMarshal(t *testing.T) {
|
||||||
t *testing.T
|
input := `static_configs:
|
||||||
}
|
- targets:
|
||||||
|
- foo:1234
|
||||||
|
- bar:4321
|
||||||
|
`
|
||||||
|
cfg := &Configs{}
|
||||||
|
err := yaml.UnmarshalStrict([]byte(input), cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// NewLogger returns a gokit compatible Logger which calls t.Log.
|
output, err := yaml.Marshal(cfg)
|
||||||
func NewLogger(t *testing.T) log.Logger {
|
require.NoError(t, err)
|
||||||
return logger{t: t}
|
require.Equal(t, input, string(output))
|
||||||
}
|
|
||||||
|
|
||||||
// Log implements log.Logger.
|
|
||||||
func (t logger) Log(keyvals ...interface{}) error {
|
|
||||||
t.t.Log(keyvals...)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
|
@ -17,17 +17,17 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
@ -111,21 +111,21 @@ type Discovery struct {
|
||||||
names []string
|
names []string
|
||||||
port int
|
port int
|
||||||
qtype uint16
|
qtype uint16
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
metrics *dnsMetrics
|
metrics *dnsMetrics
|
||||||
|
|
||||||
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*dnsMetrics)
|
m, ok := metrics.(*dnsMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
qtype := dns.TypeSRV
|
qtype := dns.TypeSRV
|
||||||
|
@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
for _, name := range d.names {
|
for _, name := range d.names {
|
||||||
go func(n string) {
|
go func(n string) {
|
||||||
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
|
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
|
||||||
level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err)
|
d.logger.Error("Error refreshing DNS targets", "err", err)
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(name)
|
}(name)
|
||||||
|
@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
// CNAME responses can occur with "Type: A" dns_sd_config requests.
|
// CNAME responses can occur with "Type: A" dns_sd_config requests.
|
||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
level.Warn(d.logger).Log("msg", "Invalid record", "record", record)
|
d.logger.Warn("Invalid record", "record", record)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, model.LabelSet{
|
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||||
|
@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
// error will be generic-looking, because trying to return all the errors
|
// error will be generic-looking, because trying to return all the errors
|
||||||
// returned by the combination of all name permutations and servers is a
|
// returned by the combination of all name permutations and servers is a
|
||||||
// nightmare.
|
// nightmare.
|
||||||
func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
conf, err := dns.ClientConfigFromFile(resolvConf)
|
conf, err := dns.ClientConfigFromFile(resolvConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not load resolv.conf: %w", err)
|
return nil, fmt.Errorf("could not load resolv.conf: %w", err)
|
||||||
|
@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
||||||
// A non-viable answer is "anything else", which encompasses both various
|
// A non-viable answer is "anything else", which encompasses both various
|
||||||
// system-level problems (like network timeouts) and also
|
// system-level problems (like network timeouts) and also
|
||||||
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
|
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
|
||||||
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) {
|
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
client := &dns.Client{}
|
client := &dns.Client{}
|
||||||
|
|
||||||
for _, server := range conf.Servers {
|
for _, server := range conf.Servers {
|
||||||
servAddr := net.JoinHostPort(server, conf.Port)
|
servAddr := net.JoinHostPort(server, conf.Port)
|
||||||
msg, err := askServerForName(name, qtype, client, servAddr, true)
|
msg, err := askServerForName(name, qtype, client, servAddr, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err)
|
logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,12 +15,12 @@ package dns
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"errors"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -40,7 +40,7 @@ func TestDNS(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
config SDConfig
|
config SDConfig
|
||||||
lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
||||||
|
|
||||||
expected []*targetgroup.Group
|
expected []*targetgroup.Group
|
||||||
}{
|
}{
|
||||||
|
@ -52,8 +52,8 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
Type: "A",
|
Type: "A",
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return nil, fmt.Errorf("some error")
|
return nil, errors.New("some error")
|
||||||
},
|
},
|
||||||
expected: []*targetgroup.Group{},
|
expected: []*targetgroup.Group{},
|
||||||
},
|
},
|
||||||
|
@ -65,7 +65,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
Type: "A",
|
Type: "A",
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.A{A: net.IPv4(192, 0, 2, 2)},
|
&dns.A{A: net.IPv4(192, 0, 2, 2)},
|
||||||
|
@ -97,7 +97,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
Type: "AAAA",
|
Type: "AAAA",
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.AAAA{AAAA: net.IPv6loopback},
|
&dns.AAAA{AAAA: net.IPv6loopback},
|
||||||
|
@ -128,7 +128,7 @@ func TestDNS(t *testing.T) {
|
||||||
Type: "SRV",
|
Type: "SRV",
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
||||||
|
@ -167,7 +167,7 @@ func TestDNS(t *testing.T) {
|
||||||
Names: []string{"_mysql._tcp.db.example.com."},
|
Names: []string{"_mysql._tcp.db.example.com."},
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
||||||
|
@ -198,7 +198,7 @@ func TestDNS(t *testing.T) {
|
||||||
Names: []string{"_mysql._tcp.db.example.com."},
|
Names: []string{"_mysql._tcp.db.example.com."},
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{}, nil
|
return &dns.Msg{}, nil
|
||||||
},
|
},
|
||||||
expected: []*targetgroup.Group{
|
expected: []*targetgroup.Group{
|
||||||
|
@ -215,7 +215,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 25,
|
Port: 25,
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
|
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
|
||||||
|
|
|
@ -16,14 +16,13 @@ package eureka
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -126,10 +125,10 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*eurekaMetrics)
|
m, ok := metrics.(*eurekaMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -26,12 +27,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/fsnotify/fsnotify"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -175,20 +175,20 @@ type Discovery struct {
|
||||||
// and how many target groups they contained.
|
// and how many target groups they contained.
|
||||||
// This is used to detect deleted target groups.
|
// This is used to detect deleted target groups.
|
||||||
lastRefresh map[string]int
|
lastRefresh map[string]int
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
metrics *fileMetrics
|
metrics *fileMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new file discovery for the given paths.
|
// NewDiscovery returns a new file discovery for the given paths.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
fm, ok := metrics.(*fileMetrics)
|
fm, ok := metrics.(*fileMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
disc := &Discovery{
|
disc := &Discovery{
|
||||||
|
@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string {
|
||||||
for _, p := range d.paths {
|
for _, p := range d.paths {
|
||||||
files, err := filepath.Glob(p)
|
files, err := filepath.Glob(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err)
|
d.logger.Error("Error expanding glob", "glob", p, "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paths = append(paths, files...)
|
paths = append(paths, files...)
|
||||||
|
@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() {
|
||||||
p = "./"
|
p = "./"
|
||||||
}
|
}
|
||||||
if err := d.watcher.Add(p); err != nil {
|
if err := d.watcher.Add(p); err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err)
|
d.logger.Error("Error adding file watch", "path", p, "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() {
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
d.logger.Error("Error adding file watcher", "err", err)
|
||||||
d.metrics.fileWatcherErrorsCount.Inc()
|
d.metrics.fileWatcherErrorsCount.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
case err := <-d.watcher.Errors:
|
case err := <-d.watcher.Errors:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error watching file", "err", err)
|
d.logger.Error("Error watching file", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) {
|
||||||
|
|
||||||
// stop shuts down the file watcher.
|
// stop shuts down the file watcher.
|
||||||
func (d *Discovery) stop() {
|
func (d *Discovery) stop() {
|
||||||
level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
|
d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
@ -320,10 +320,10 @@ func (d *Discovery) stop() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := d.watcher.Close(); err != nil {
|
if err := d.watcher.Close(); err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
|
d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Debug(d.logger).Log("msg", "File discovery stopped")
|
d.logger.Debug("File discovery stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
// refresh reads all files matching the discovery's patterns and sends the respective
|
// refresh reads all files matching the discovery's patterns and sends the respective
|
||||||
|
@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.metrics.fileSDReadErrorsCount.Inc()
|
d.metrics.fileSDReadErrorsCount.Inc()
|
||||||
|
|
||||||
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
d.logger.Error("Error reading file", "path", p, "err", err)
|
||||||
// Prevent deletion down below.
|
// Prevent deletion down below.
|
||||||
ref[p] = d.lastRefresh[p]
|
ref[p] = d.lastRefresh[p]
|
||||||
continue
|
continue
|
||||||
|
@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
for f, n := range d.lastRefresh {
|
for f, n := range d.lastRefresh {
|
||||||
m, ok := ref[f]
|
m, ok := ref[f]
|
||||||
if !ok || n > m {
|
if !ok || n > m {
|
||||||
level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f)
|
d.logger.Debug("file_sd refresh found file that should be removed", "file", f)
|
||||||
d.deleteTimestamp(f)
|
d.deleteTimestamp(f)
|
||||||
for i := m; i < n; i++ {
|
for i := m; i < n; i++ {
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -17,12 +17,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
|
@ -129,10 +129,10 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*gceMetrics)
|
m, ok := metrics.(*gceMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
|
|
|
@ -15,12 +15,12 @@ package hetzner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -58,7 +58,7 @@ type hcloudDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||||
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
|
func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) {
|
||||||
d := &hcloudDiscovery{
|
d := &hcloudDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) {
|
||||||
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
|
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
|
||||||
cfg.hcloudEndpoint = suite.Mock.Endpoint()
|
cfg.hcloudEndpoint = suite.Mock.Endpoint()
|
||||||
|
|
||||||
d, err := newHcloudDiscovery(&cfg, log.NewNopLogger())
|
d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
|
|
|
@ -17,9 +17,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -135,10 +135,10 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*hetznerMetrics)
|
m, ok := metrics.(*hetznerMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := newRefresher(conf, logger)
|
r, err := newRefresher(conf, logger)
|
||||||
|
@ -157,7 +157,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
|
||||||
switch conf.Role {
|
switch conf.Role {
|
||||||
case HetznerRoleHcloud:
|
case HetznerRoleHcloud:
|
||||||
if conf.hcloudEndpoint == "" {
|
if conf.hcloudEndpoint == "" {
|
||||||
|
|
|
@ -18,13 +18,13 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -51,7 +51,7 @@ type robotDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
||||||
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
|
func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) {
|
||||||
d := &robotDiscovery{
|
d := &robotDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
endpoint: conf.robotEndpoint,
|
endpoint: conf.robotEndpoint,
|
||||||
|
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) {
|
||||||
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
|
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
|
||||||
cfg.robotEndpoint = suite.Mock.Endpoint()
|
cfg.robotEndpoint = suite.Mock.Endpoint()
|
||||||
|
|
||||||
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
|
d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
|
@ -91,12 +91,11 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
cfg.robotEndpoint = suite.Mock.Endpoint()
|
cfg.robotEndpoint = suite.Mock.Endpoint()
|
||||||
|
|
||||||
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
|
d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
require.Error(t, err)
|
require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot")
|
||||||
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
|
|
||||||
|
|
||||||
require.Empty(t, targetGroups)
|
require.Empty(t, targetGroups)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,17 +19,18 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -40,8 +41,8 @@ import (
|
||||||
var (
|
var (
|
||||||
// DefaultSDConfig is the default HTTP SD configuration.
|
// DefaultSDConfig is the default HTTP SD configuration.
|
||||||
DefaultSDConfig = SDConfig{
|
DefaultSDConfig = SDConfig{
|
||||||
RefreshInterval: model.Duration(60 * time.Second),
|
|
||||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||||
|
RefreshInterval: model.Duration(60 * time.Second),
|
||||||
}
|
}
|
||||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||||
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
||||||
|
@ -85,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if c.URL == "" {
|
if c.URL == "" {
|
||||||
return fmt.Errorf("URL is missing")
|
return errors.New("URL is missing")
|
||||||
}
|
}
|
||||||
parsedURL, err := url.Parse(c.URL)
|
parsedURL, err := url.Parse(c.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
||||||
return fmt.Errorf("URL scheme must be 'http' or 'https'")
|
return errors.New("URL scheme must be 'http' or 'https'")
|
||||||
}
|
}
|
||||||
if parsedURL.Host == "" {
|
if parsedURL.Host == "" {
|
||||||
return fmt.Errorf("host is missing in URL")
|
return errors.New("host is missing in URL")
|
||||||
}
|
}
|
||||||
return c.HTTPClientConfig.Validate()
|
return c.HTTPClientConfig.Validate()
|
||||||
}
|
}
|
||||||
|
@ -114,14 +115,14 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new HTTP discovery for the given config.
|
// NewDiscovery returns a new HTTP discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*httpMetrics)
|
m, ok := metrics.(*httpMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...)
|
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...)
|
||||||
|
|
|
@ -21,11 +21,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -15,10 +15,9 @@ package ionos
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -43,10 +42,10 @@ func init() {
|
||||||
type Discovery struct{}
|
type Discovery struct{}
|
||||||
|
|
||||||
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*ionosMetrics)
|
m, ok := metrics.(*ionosMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, errors.New("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if conf.ionosEndpoint == "" {
|
if conf.ionosEndpoint == "" {
|
||||||
|
|
|
@ -16,13 +16,13 @@ package ionos
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
ionoscloud "github.com/ionos-cloud/sdk-go/v6"
|
ionoscloud "github.com/ionos-cloud/sdk-go/v6"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -60,7 +60,7 @@ type serverDiscovery struct {
|
||||||
datacenterID string
|
datacenterID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
|
func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) {
|
||||||
d := &serverDiscovery{
|
d := &serverDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
datacenterID: conf.DatacenterID,
|
datacenterID: conf.DatacenterID,
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
@ -33,7 +33,7 @@ import (
|
||||||
|
|
||||||
// Endpoints discovers new endpoint targets.
|
// Endpoints discovers new endpoint targets.
|
||||||
type Endpoints struct {
|
type Endpoints struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
endpointsInf cache.SharedIndexInformer
|
endpointsInf cache.SharedIndexInformer
|
||||||
serviceInf cache.SharedInformer
|
serviceInf cache.SharedInformer
|
||||||
|
@ -49,9 +49,9 @@ type Endpoints struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpoints returns a new endpoints discovery.
|
// NewEndpoints returns a new endpoints discovery.
|
||||||
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
|
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
|
||||||
|
@ -92,26 +92,23 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err)
|
l.Error("Error adding endpoints event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
e.logger.Error("converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ep := &apiv1.Endpoints{}
|
obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name))
|
||||||
ep.Namespace = svc.Namespace
|
|
||||||
ep.Name = svc.Name
|
|
||||||
obj, exists, err := e.endpointsStore.Get(ep)
|
|
||||||
if exists && err == nil {
|
if exists && err == nil {
|
||||||
e.enqueue(obj.(*apiv1.Endpoints))
|
e.enqueue(obj.(*apiv1.Endpoints))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
|
e.logger.Error("retrieving endpoints failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
@ -131,7 +128,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
l.Error("Error adding services event handler.", "err", err)
|
||||||
}
|
}
|
||||||
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
|
@ -154,7 +151,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
l.Error("Error adding pods event handler.", "err", err)
|
||||||
}
|
}
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
@ -167,12 +164,15 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o interface{}) {
|
||||||
node := o.(*apiv1.Node)
|
nodeName, err := nodeName(o)
|
||||||
e.enqueueNode(node.Name)
|
if err != nil {
|
||||||
|
l.Error("Error getting Node name", "err", err)
|
||||||
|
}
|
||||||
|
e.enqueueNode(nodeName)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
l.Error("Error adding nodes event handler.", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
func (e *Endpoints) enqueueNode(nodeName string) {
|
func (e *Endpoints) enqueueNode(nodeName string) {
|
||||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) {
|
||||||
func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
||||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
|
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache")
|
e.logger.Error("endpoints informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -247,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
|
|
||||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
|
e.logger.Error("splitting key failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
o, exists, err := e.endpointsStore.GetByKey(key)
|
o, exists, err := e.endpointsStore.GetByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
|
e.logger.Error("getting object from store failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -262,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
}
|
}
|
||||||
eps, err := convertToEndpoints(o)
|
eps, err := convertToEndpoints(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err)
|
e.logger.Error("converting to Endpoints object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
send(ctx, ch, e.buildEndpoints(eps))
|
send(ctx, ch, e.buildEndpoints(eps))
|
||||||
|
@ -361,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
target = target.Merge(podLabels(pod))
|
target = target.Merge(podLabels(pod))
|
||||||
|
|
||||||
// Attach potential container port labels matching the endpoint port.
|
// Attach potential container port labels matching the endpoint port.
|
||||||
for _, c := range pod.Spec.Containers {
|
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
if port.Port == cport.ContainerPort {
|
if port.Port == cport.ContainerPort {
|
||||||
ports := strconv.FormatUint(uint64(port.Port), 10)
|
ports := strconv.FormatUint(uint64(port.Port), 10)
|
||||||
|
isInit := i >= len(pod.Spec.Containers)
|
||||||
|
|
||||||
target[podContainerNameLabel] = lv(c.Name)
|
target[podContainerNameLabel] = lv(c.Name)
|
||||||
target[podContainerImageLabel] = lv(c.Image)
|
target[podContainerImageLabel] = lv(c.Image)
|
||||||
target[podContainerPortNameLabel] = lv(cport.Name)
|
target[podContainerPortNameLabel] = lv(cport.Name)
|
||||||
target[podContainerPortNumberLabel] = lv(ports)
|
target[podContainerPortNumberLabel] = lv(ports)
|
||||||
target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
|
target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
|
||||||
|
target[podContainerIsInit] = lv(strconv.FormatBool(isInit))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -397,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
|
|
||||||
v := eps.Labels[apiv1.EndpointsOverCapacity]
|
v := eps.Labels[apiv1.EndpointsOverCapacity]
|
||||||
if v == "truncated" {
|
if v == "truncated" {
|
||||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||||
}
|
}
|
||||||
if v == "warning" {
|
if v == "warning" {
|
||||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For all seen pods, check all container ports. If they were not covered
|
// For all seen pods, check all container ports. If they were not covered
|
||||||
|
@ -411,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range pe.pod.Spec.Containers {
|
containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
hasSeenPort := func() bool {
|
hasSeenPort := func() bool {
|
||||||
for _, eport := range pe.servicePorts {
|
for _, eport := range pe.servicePorts {
|
||||||
|
@ -428,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
|
isInit := i >= len(pe.pod.Spec.Containers)
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
|
@ -435,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
|
@ -448,13 +454,10 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
if ref == nil || ref.Kind != "Pod" {
|
if ref == nil || ref.Kind != "Pod" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
p := &apiv1.Pod{}
|
|
||||||
p.Namespace = ref.Namespace
|
|
||||||
p.Name = ref.Name
|
|
||||||
|
|
||||||
obj, exists, err := e.podStore.Get(p)
|
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
e.logger.Error("resolving pod ref failed", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -464,31 +467,27 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
||||||
svc := &apiv1.Service{}
|
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
|
||||||
svc.Namespace = ns
|
|
||||||
svc.Name = name
|
|
||||||
|
|
||||||
obj, exists, err := e.serviceStore.Get(svc)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
e.logger.Error("retrieving service failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
svc = obj.(*apiv1.Service)
|
svc := obj.(*apiv1.Service)
|
||||||
|
|
||||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||||
}
|
}
|
||||||
|
|
||||||
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet {
|
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet {
|
||||||
if nodeName == nil {
|
if nodeName == nil {
|
||||||
return tg
|
return tg
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
|
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err)
|
logger.Error("Error getting node", "node", *nodeName, "err", err)
|
||||||
return tg
|
return tg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,10 +18,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
@ -244,6 +246,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "1.2.3.4:9001",
|
"__address__": "1.2.3.4:9001",
|
||||||
|
@ -259,6 +262,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9001",
|
"__meta_kubernetes_pod_container_port_number": "9001",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -821,6 +825,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1078,6 +1083,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1089,3 +1095,186 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||||
},
|
},
|
||||||
}.Run(t)
|
}.Run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
|
||||||
|
objs := []runtime.Object{
|
||||||
|
&v1.Endpoints{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testsidecar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{
|
||||||
|
{
|
||||||
|
Addresses: []v1.EndpointAddress{
|
||||||
|
{
|
||||||
|
IP: "4.3.2.1",
|
||||||
|
TargetRef: &v1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.EndpointPort{
|
||||||
|
{
|
||||||
|
Name: "testport",
|
||||||
|
Port: 9000,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
Port: 9111,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "default",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "ic1",
|
||||||
|
Image: "ic1:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
ContainerPort: 1111,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ic2",
|
||||||
|
Image: "ic2:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
ContainerPort: 9111,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "c1:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
HostIP: "2.3.4.5",
|
||||||
|
PodIP: "4.3.2.1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 1,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
|
"endpoints/default/testsidecar": {
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:9000",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "testpod",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "c1",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:9111",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "testpod",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "initport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_pod_container_image": "ic2:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "ic2",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "initport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "9111",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:1111",
|
||||||
|
"__meta_kubernetes_pod_container_image": "ic1:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "ic1",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "initport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "1111",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: model.LabelSet{
|
||||||
|
"__meta_kubernetes_endpoints_name": "testsidecar",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
},
|
||||||
|
Source: "endpoints/default/testsidecar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkResolvePodRef(b *testing.B) {
|
||||||
|
indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil)
|
||||||
|
e := &Endpoints{
|
||||||
|
podStore: indexer,
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
p := e.resolvePodRef(&v1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "foo",
|
||||||
|
})
|
||||||
|
require.Nil(b, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -17,16 +17,15 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/discovery/v1"
|
v1 "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/api/discovery/v1beta1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
|
||||||
|
@ -36,7 +35,7 @@ import (
|
||||||
|
|
||||||
// EndpointSlice discovers new endpoint targets.
|
// EndpointSlice discovers new endpoint targets.
|
||||||
type EndpointSlice struct {
|
type EndpointSlice struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
endpointSliceInf cache.SharedIndexInformer
|
endpointSliceInf cache.SharedIndexInformer
|
||||||
serviceInf cache.SharedInformer
|
serviceInf cache.SharedInformer
|
||||||
|
@ -52,9 +51,9 @@ type EndpointSlice struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpointSlice returns a new endpointslice discovery.
|
// NewEndpointSlice returns a new endpointslice discovery.
|
||||||
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
|
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
|
||||||
|
@ -93,23 +92,23 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err)
|
l.Error("Error adding endpoint slices event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
e.logger.Error("converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(brancz): use cache.Indexer to index endpoints by
|
// TODO(brancz): use cache.Indexer to index endpointslices by
|
||||||
// disv1beta1.LabelServiceName so this operation doesn't have to
|
// LabelServiceName so this operation doesn't have to iterate over all
|
||||||
// iterate over all endpoint objects.
|
// endpoint objects.
|
||||||
for _, obj := range e.endpointSliceStore.List() {
|
for _, obj := range e.endpointSliceStore.List() {
|
||||||
esa, err := e.getEndpointSliceAdaptor(obj)
|
esa, err := e.getEndpointSliceAdaptor(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
|
e.logger.Error("converting to EndpointSlice object failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name {
|
if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name {
|
||||||
|
@ -132,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
l.Error("Error adding services event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
|
@ -146,12 +145,15 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
e.enqueueNode(node.Name)
|
e.enqueueNode(node.Name)
|
||||||
},
|
},
|
||||||
DeleteFunc: func(o interface{}) {
|
DeleteFunc: func(o interface{}) {
|
||||||
node := o.(*apiv1.Node)
|
nodeName, err := nodeName(o)
|
||||||
e.enqueueNode(node.Name)
|
if err != nil {
|
||||||
|
l.Error("Error getting Node name", "err", err)
|
||||||
|
}
|
||||||
|
e.enqueueNode(nodeName)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
l.Error("Error adding nodes event handler.", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +163,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
func (e *EndpointSlice) enqueueNode(nodeName string) {
|
func (e *EndpointSlice) enqueueNode(nodeName string) {
|
||||||
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -189,7 +191,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
}
|
}
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
e.logger.Error("endpointslice informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -213,13 +215,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
|
|
||||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
|
e.logger.Error("splitting key failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
o, exists, err := e.endpointSliceStore.GetByKey(key)
|
o, exists, err := e.endpointSliceStore.GetByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
|
e.logger.Error("getting object from store failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -229,7 +231,7 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
|
|
||||||
esa, err := e.getEndpointSliceAdaptor(o)
|
esa, err := e.getEndpointSliceAdaptor(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
|
e.logger.Error("converting to EndpointSlice object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,8 +243,6 @@ func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAda
|
||||||
switch endpointSlice := o.(type) {
|
switch endpointSlice := o.(type) {
|
||||||
case *v1.EndpointSlice:
|
case *v1.EndpointSlice:
|
||||||
return newEndpointSliceAdaptorFromV1(endpointSlice), nil
|
return newEndpointSliceAdaptorFromV1(endpointSlice), nil
|
||||||
case *v1beta1.EndpointSlice:
|
|
||||||
return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("received unexpected object: %v", o)
|
return nil, fmt.Errorf("received unexpected object: %v", o)
|
||||||
}
|
}
|
||||||
|
@ -380,19 +380,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
target = target.Merge(podLabels(pod))
|
target = target.Merge(podLabels(pod))
|
||||||
|
|
||||||
// Attach potential container port labels matching the endpoint port.
|
// Attach potential container port labels matching the endpoint port.
|
||||||
for _, c := range pod.Spec.Containers {
|
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
if port.port() == nil {
|
if port.port() == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if *port.port() == cport.ContainerPort {
|
if *port.port() == cport.ContainerPort {
|
||||||
ports := strconv.FormatUint(uint64(*port.port()), 10)
|
ports := strconv.FormatUint(uint64(*port.port()), 10)
|
||||||
|
isInit := i >= len(pod.Spec.Containers)
|
||||||
|
|
||||||
target[podContainerNameLabel] = lv(c.Name)
|
target[podContainerNameLabel] = lv(c.Name)
|
||||||
target[podContainerImageLabel] = lv(c.Image)
|
target[podContainerImageLabel] = lv(c.Image)
|
||||||
target[podContainerPortNameLabel] = lv(cport.Name)
|
target[podContainerPortNameLabel] = lv(cport.Name)
|
||||||
target[podContainerPortNumberLabel] = lv(ports)
|
target[podContainerPortNumberLabel] = lv(ports)
|
||||||
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
|
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
|
||||||
|
target[podContainerIsInit] = lv(strconv.FormatBool(isInit))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -420,7 +424,8 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range pe.pod.Spec.Containers {
|
containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
hasSeenPort := func() bool {
|
hasSeenPort := func() bool {
|
||||||
for _, eport := range pe.servicePorts {
|
for _, eport := range pe.servicePorts {
|
||||||
|
@ -440,6 +445,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
|
isInit := i >= len(pe.pod.Spec.Containers)
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
|
@ -447,6 +453,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
|
@ -460,13 +467,10 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
if ref == nil || ref.Kind != "Pod" {
|
if ref == nil || ref.Kind != "Pod" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
p := &apiv1.Pod{}
|
|
||||||
p.Namespace = ref.Namespace
|
|
||||||
p.Name = ref.Name
|
|
||||||
|
|
||||||
obj, exists, err := e.podStore.Get(p)
|
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
e.logger.Error("resolving pod ref failed", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -477,27 +481,27 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
|
|
||||||
func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) {
|
func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) {
|
||||||
var (
|
var (
|
||||||
svc = &apiv1.Service{}
|
|
||||||
found bool
|
found bool
|
||||||
|
name string
|
||||||
)
|
)
|
||||||
svc.Namespace = esa.namespace()
|
ns := esa.namespace()
|
||||||
|
|
||||||
// Every EndpointSlice object has the Service they belong to in the
|
// Every EndpointSlice object has the Service they belong to in the
|
||||||
// kubernetes.io/service-name label.
|
// kubernetes.io/service-name label.
|
||||||
svc.Name, found = esa.labels()[esa.labelServiceName()]
|
name, found = esa.labels()[esa.labelServiceName()]
|
||||||
if !found {
|
if !found {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, exists, err := e.serviceStore.Get(svc)
|
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
e.logger.Error("retrieving service failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
svc = obj.(*apiv1.Service)
|
svc := obj.(*apiv1.Service)
|
||||||
|
|
||||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ package kubernetes
|
||||||
import (
|
import (
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/discovery/v1"
|
v1 "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/api/discovery/v1beta1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -109,59 +108,6 @@ func (e *endpointSliceAdaptorV1) labelServiceName() string {
|
||||||
return v1.LabelServiceName
|
return v1.LabelServiceName
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adaptor for k8s.io/api/discovery/v1beta1.
|
|
||||||
type endpointSliceAdaptorV1Beta1 struct {
|
|
||||||
endpointSlice *v1beta1.EndpointSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor {
|
|
||||||
return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) get() interface{} {
|
|
||||||
return e.endpointSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta {
|
|
||||||
return e.endpointSlice.ObjectMeta
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) name() string {
|
|
||||||
return e.endpointSlice.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) namespace() string {
|
|
||||||
return e.endpointSlice.Namespace
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) addressType() string {
|
|
||||||
return string(e.endpointSlice.AddressType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor {
|
|
||||||
eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints))
|
|
||||||
for i := 0; i < len(e.endpointSlice.Endpoints); i++ {
|
|
||||||
eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i]))
|
|
||||||
}
|
|
||||||
return eps
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor {
|
|
||||||
ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports))
|
|
||||||
for i := 0; i < len(e.endpointSlice.Ports); i++ {
|
|
||||||
ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i]))
|
|
||||||
}
|
|
||||||
return ports
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string {
|
|
||||||
return e.endpointSlice.Labels
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string {
|
|
||||||
return v1beta1.LabelServiceName
|
|
||||||
}
|
|
||||||
|
|
||||||
type endpointSliceEndpointAdaptorV1 struct {
|
type endpointSliceEndpointAdaptorV1 struct {
|
||||||
endpoint v1.Endpoint
|
endpoint v1.Endpoint
|
||||||
}
|
}
|
||||||
|
@ -218,62 +164,6 @@ func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool {
|
||||||
return e.endpointConditions.Terminating
|
return e.endpointConditions.Terminating
|
||||||
}
|
}
|
||||||
|
|
||||||
type endpointSliceEndpointAdaptorV1beta1 struct {
|
|
||||||
endpoint v1beta1.Endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor {
|
|
||||||
return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string {
|
|
||||||
return e.endpoint.Addresses
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string {
|
|
||||||
return e.endpoint.Hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string {
|
|
||||||
return e.endpoint.NodeName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor {
|
|
||||||
return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference {
|
|
||||||
return e.endpoint.TargetRef
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string {
|
|
||||||
return e.endpoint.Topology
|
|
||||||
}
|
|
||||||
|
|
||||||
type endpointSliceEndpointConditionsAdaptorV1beta1 struct {
|
|
||||||
endpointConditions v1beta1.EndpointConditions
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor {
|
|
||||||
return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool {
|
|
||||||
return e.endpointConditions.Ready
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool {
|
|
||||||
return e.endpointConditions.Serving
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool {
|
|
||||||
return e.endpointConditions.Terminating
|
|
||||||
}
|
|
||||||
|
|
||||||
type endpointSlicePortAdaptorV1 struct {
|
type endpointSlicePortAdaptorV1 struct {
|
||||||
endpointPort v1.EndpointPort
|
endpointPort v1.EndpointPort
|
||||||
}
|
}
|
||||||
|
@ -298,28 +188,3 @@ func (e *endpointSlicePortAdaptorV1) protocol() *string {
|
||||||
func (e *endpointSlicePortAdaptorV1) appProtocol() *string {
|
func (e *endpointSlicePortAdaptorV1) appProtocol() *string {
|
||||||
return e.endpointPort.AppProtocol
|
return e.endpointPort.AppProtocol
|
||||||
}
|
}
|
||||||
|
|
||||||
type endpointSlicePortAdaptorV1beta1 struct {
|
|
||||||
endpointPort v1beta1.EndpointPort
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor {
|
|
||||||
return &endpointSlicePortAdaptorV1beta1{endpointPort: port}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSlicePortAdaptorV1beta1) name() *string {
|
|
||||||
return e.endpointPort.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSlicePortAdaptorV1beta1) port() *int32 {
|
|
||||||
return e.endpointPort.Port
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSlicePortAdaptorV1beta1) protocol() *string {
|
|
||||||
val := string(*e.endpointPort.Protocol)
|
|
||||||
return &val
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string {
|
|
||||||
return e.endpointPort.AppProtocol
|
|
||||||
}
|
|
||||||
|
|
|
@ -18,7 +18,6 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
v1 "k8s.io/api/discovery/v1"
|
v1 "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/api/discovery/v1beta1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||||
|
@ -48,31 +47,3 @@ func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||||
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_EndpointSliceAdaptor_v1beta1(t *testing.T) {
|
|
||||||
endpointSlice := makeEndpointSliceV1beta1()
|
|
||||||
adaptor := newEndpointSliceAdaptorFromV1beta1(endpointSlice)
|
|
||||||
|
|
||||||
require.Equal(t, endpointSlice.ObjectMeta.Name, adaptor.name())
|
|
||||||
require.Equal(t, endpointSlice.ObjectMeta.Namespace, adaptor.namespace())
|
|
||||||
require.Equal(t, endpointSlice.AddressType, v1beta1.AddressType(adaptor.addressType()))
|
|
||||||
require.Equal(t, endpointSlice.Labels, adaptor.labels())
|
|
||||||
require.Equal(t, "testendpoints", endpointSlice.Labels[v1beta1.LabelServiceName])
|
|
||||||
|
|
||||||
for i, endpointAdaptor := range adaptor.endpoints() {
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Addresses, endpointAdaptor.addresses())
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Hostname, endpointAdaptor.hostname())
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Ready, endpointAdaptor.conditions().ready())
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Serving, endpointAdaptor.conditions().serving())
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Conditions.Terminating, endpointAdaptor.conditions().terminating())
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].TargetRef, endpointAdaptor.targetRef())
|
|
||||||
require.Equal(t, endpointSlice.Endpoints[i].Topology, endpointAdaptor.topology())
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, portAdaptor := range adaptor.ports() {
|
|
||||||
require.Equal(t, endpointSlice.Ports[i].Name, portAdaptor.name())
|
|
||||||
require.Equal(t, endpointSlice.Ports[i].Port, portAdaptor.port())
|
|
||||||
require.EqualValues(t, endpointSlice.Ports[i].Protocol, portAdaptor.protocol())
|
|
||||||
require.Equal(t, endpointSlice.Ports[i].AppProtocol, portAdaptor.appProtocol())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue