mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' into dimitar/ruler/unsupported-logger
Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>
This commit is contained in:
commit
237139c992
15
.github/dependabot.yml
vendored
15
.github/dependabot.yml
vendored
|
@ -16,8 +16,23 @@ updates:
|
|||
directory: "/documentation/examples/remote_storage"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
# New manteen-ui packages.
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web/ui"
|
||||
labels:
|
||||
- dependencies
|
||||
- javascript
|
||||
- manteen-ui
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 20
|
||||
# Old react-app packages.
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web/ui/react-app"
|
||||
labels:
|
||||
- dependencies
|
||||
- javascript
|
||||
- old-react-ui
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
open-pull-requests-limit: 20
|
||||
|
|
4
.github/workflows/buf-lint.yml
vendored
4
.github/workflows/buf-lint.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
4
.github/workflows/buf.yml
vendored
4
.github/workflows/buf.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: bufbuild/buf-setup-action@a47c93e0b1648d5651a065437926377d060baa99 # v1.50.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||
|
|
60
.github/workflows/ci.yml
vendored
60
.github/workflows/ci.yml
vendored
|
@ -13,12 +13,12 @@ jobs:
|
|||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_npm: true
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1 test-flags=""
|
||||
- run: make GOOPTS=--tags=stringlabels GO_ONLY=1 SKIP_GOLANGCI_LINT=1
|
||||
- run: go test --tags=stringlabels ./tsdb/ -test.tsdb-isolation=false
|
||||
- run: make -C documentation/examples/remote_storage
|
||||
- run: make -C documentation/examples
|
||||
|
@ -29,11 +29,11 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
- run: GOARCH=386 go test ./cmd/prometheus
|
||||
- run: GOARCH=386 go test ./...
|
||||
- uses: ./.github/promci/actions/check_proto
|
||||
with:
|
||||
version: "3.15.8"
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
|||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: make build
|
||||
# Don't run NPM build; don't run race-detector.
|
||||
- run: make test GO_ONLY=1 test-flags=""
|
||||
|
@ -62,8 +62,8 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
enable_go: false
|
||||
|
@ -79,8 +79,8 @@ jobs:
|
|||
name: Go tests on Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
- run: |
|
||||
|
@ -96,7 +96,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: go install ./cmd/promtool/.
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
|
@ -121,8 +121,8 @@ jobs:
|
|||
matrix:
|
||||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
|
||||
|
@ -146,8 +146,8 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
parallelism: 12
|
||||
|
@ -169,9 +169,9 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
cache: false
|
||||
go-version: 1.23.x
|
||||
|
@ -182,20 +182,20 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
- name: Install snmp_exporter/generator dependencies
|
||||
run: sudo apt-get update && sudo apt-get -y install libsnmp-dev
|
||||
if: github.repository == 'prometheus/snmp_exporter'
|
||||
- name: Lint
|
||||
uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0
|
||||
uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0
|
||||
with:
|
||||
args: --verbose
|
||||
# Make sure to sync this with Makefile.common and scripts/golangci-lint.yml.
|
||||
version: v1.60.2
|
||||
version: v1.63.4
|
||||
fuzzing:
|
||||
uses: ./.github/workflows/fuzzing.yml
|
||||
if: github.event_name == 'pull_request'
|
||||
|
@ -208,8 +208,8 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -225,8 +225,8 @@ jobs:
|
|||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
docker_hub_login: ${{ secrets.docker_hub_login }}
|
||||
|
@ -240,14 +240,14 @@ jobs:
|
|||
needs: [test_ui, codeql]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: prometheus/promci@c3c93a50d581b928af720f0134b2b2dad32a6c41 # v0.4.6
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
|
||||
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
|
||||
with:
|
||||
node-version-file: "web/ui/.nvmrc"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
|
||||
- uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -24,15 +24,15 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
uses: github/codeql-action/init@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
uses: github/codeql-action/autobuild@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
uses: github/codeql-action/analyze@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8
|
||||
|
|
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set docker hub repo name
|
||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||
- name: Push README to Dockerhub
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Set quay.io org name
|
||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||
- name: Set quay.io repo name
|
||||
|
|
61
.github/workflows/funcbench.yml
vendored
61
.github/workflows/funcbench.yml
vendored
|
@ -1,61 +0,0 @@
|
|||
on:
|
||||
repository_dispatch:
|
||||
types: [funcbench_start]
|
||||
name: Funcbench Workflow
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
run_funcbench:
|
||||
name: Running funcbench
|
||||
if: github.event.action == 'funcbench_start'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
AUTH_FILE: ${{ secrets.TEST_INFRA_PROVIDER_AUTH }}
|
||||
BRANCH: ${{ github.event.client_payload.BRANCH }}
|
||||
BENCH_FUNC_REGEX: ${{ github.event.client_payload.BENCH_FUNC_REGEX }}
|
||||
PACKAGE_PATH: ${{ github.event.client_payload.PACKAGE_PATH }}
|
||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||
GITHUB_ORG: prometheus
|
||||
GITHUB_REPO: prometheus
|
||||
GITHUB_STATUS_TARGET_URL: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}
|
||||
LAST_COMMIT_SHA: ${{ github.event.client_payload.LAST_COMMIT_SHA }}
|
||||
GKE_PROJECT_ID: macro-mile-203600
|
||||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||
PROVIDER: gke
|
||||
ZONE: europe-west3-a
|
||||
steps:
|
||||
- name: Update status to pending
|
||||
run: >-
|
||||
curl -i -X POST
|
||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||
-H "Content-Type: application/json"
|
||||
--data '{"state":"pending","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
|
||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||
- name: Prepare nodepool
|
||||
uses: docker://prominfra/funcbench:master
|
||||
with:
|
||||
entrypoint: "docker_entrypoint"
|
||||
args: make deploy
|
||||
- name: Delete all resources
|
||||
if: always()
|
||||
uses: docker://prominfra/funcbench:master
|
||||
with:
|
||||
entrypoint: "docker_entrypoint"
|
||||
args: make clean
|
||||
- name: Update status to failure
|
||||
if: failure()
|
||||
run: >-
|
||||
curl -i -X POST
|
||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||
-H "Content-Type: application/json"
|
||||
--data '{"state":"failure","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
|
||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
||||
- name: Update status to success
|
||||
if: success()
|
||||
run: >-
|
||||
curl -i -X POST
|
||||
-H "Authorization: Bearer $GITHUB_TOKEN"
|
||||
-H "Content-Type: application/json"
|
||||
--data '{"state":"success","context":"funcbench-status","target_url":"'$GITHUB_STATUS_TARGET_URL'"}'
|
||||
"https://api.github.com/repos/$GITHUB_REPOSITORY/statuses/$LAST_COMMIT_SHA"
|
2
.github/workflows/fuzzing.yml
vendored
2
.github/workflows/fuzzing.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
fuzz-seconds: 600
|
||||
dry-run: false
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
|
|
2
.github/workflows/prombench.yml
vendored
2
.github/workflows/prombench.yml
vendored
|
@ -15,6 +15,8 @@ env:
|
|||
PR_NUMBER: ${{ github.event.client_payload.PR_NUMBER }}
|
||||
PROVIDER: gke
|
||||
RELEASE: ${{ github.event.client_payload.RELEASE }}
|
||||
BENCHMARK_VERSION: ${{ github.event.client_payload.BENCHMARK_VERSION }}
|
||||
BENCHMARK_DIRECTORY: ${{ github.event.client_payload.BENCHMARK_DIRECTORY }}
|
||||
ZONE: europe-west3-a
|
||||
jobs:
|
||||
benchmark_start:
|
||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder
|
||||
steps:
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||
|
|
6
.github/workflows/scorecards.yml
vendored
6
.github/workflows/scorecards.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # tag=v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
@ -37,7 +37,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # tag=v4.4.0
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # tag=v4.6.0
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
@ -45,6 +45,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10
|
||||
uses: github/codeql-action/upload-sarif@dd746615b3b9d728a6a37ca2045b68ca76d4841a # tag=v3.28.8
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# opt out of defaults to avoid marking issues as stale and closing them
|
||||
|
|
|
@ -5,25 +5,28 @@ output:
|
|||
sort-results: true
|
||||
|
||||
linters:
|
||||
# Keep this list sorted alphabetically
|
||||
enable:
|
||||
- depguard
|
||||
- errorlint
|
||||
- exptostd
|
||||
- gocritic
|
||||
- godot
|
||||
- gofumpt
|
||||
- goimports
|
||||
- loggercheck
|
||||
- misspell
|
||||
- nilnesserr
|
||||
- nolintlint
|
||||
- perfsprint
|
||||
- predeclared
|
||||
- revive
|
||||
- sloglint
|
||||
- testifylint
|
||||
- unconvert
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- whitespace
|
||||
- loggercheck
|
||||
- sloglint
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
|
@ -109,7 +112,7 @@ linters-settings:
|
|||
extra-rules: true
|
||||
perfsprint:
|
||||
# Optimizes `fmt.Errorf`.
|
||||
errorf: false
|
||||
errorf: true
|
||||
revive:
|
||||
# By default, revive will enable only the linting rules that are named in the configuration file.
|
||||
# So, it's needed to explicitly enable all required rules here.
|
||||
|
@ -123,6 +126,9 @@ linters-settings:
|
|||
- allowTypesBefore: "*testing.T,testing.TB"
|
||||
- name: context-keys-type
|
||||
- name: dot-imports
|
||||
- name: early-return
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
# A lot of false positives: incorrectly identifies channel draining as "empty code block".
|
||||
# See https://github.com/mgechev/revive/issues/386
|
||||
- name: empty-block
|
||||
|
@ -134,6 +140,8 @@ linters-settings:
|
|||
- name: exported
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: package-comments
|
||||
# TODO(beorn7): Currently, we have a lot of missing package doc comments. Maybe we should have them.
|
||||
disabled: true
|
||||
|
@ -141,6 +149,8 @@ linters-settings:
|
|||
- name: receiver-naming
|
||||
- name: redefines-builtin-id
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: time-naming
|
||||
- name: unexported-return
|
||||
- name: unreachable-code
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
extends: default
|
||||
ignore: |
|
||||
ui/react-app/node_modules
|
||||
**/node_modules
|
||||
|
||||
rules:
|
||||
braces:
|
||||
|
|
141
CHANGELOG.md
141
CHANGELOG.md
|
@ -2,33 +2,143 @@
|
|||
|
||||
## unreleased
|
||||
|
||||
* [ENHANCEMENT] promtool: Support linting of scrape interval, through lint option `too-long-scrape-interval`. #15719
|
||||
|
||||
## 3.1.0 / 2025-01-02
|
||||
|
||||
* [SECURITY] upgrade golang.org/x/crypto to address reported CVE-2024-45337. #15691
|
||||
* [CHANGE] Notifier: Increment prometheus_notifications_errors_total by the number of affected alerts rather than per batch. #15428
|
||||
* [CHANGE] API: list rules field "groupNextToken:omitempty" renamed to "groupNextToken". #15400
|
||||
* [ENHANCEMENT] OTLP translate: keep identifying attributes in target_info. #15448
|
||||
* [ENHANCEMENT] Paginate rule groups, add infinite scroll to rules within groups. #15677
|
||||
* [ENHANCEMENT] TSDB: Improve calculation of space used by labels. #13880
|
||||
* [ENHANCEMENT] Rules: new metric rule_group_last_rule_duration_sum_seconds. #15672
|
||||
* [ENHANCEMENT] Observability: Export 'go_sync_mutex_wait_total_seconds_total' metric. #15339
|
||||
* [ENHANCEMEN] Remote-Write: optionally use a DNS resolver that picks a random IP. #15329
|
||||
* [PERF] Optimize `l=~".+"` matcher. #15474, #15684
|
||||
* [PERF] TSDB: Cache all symbols for compaction . #15455
|
||||
* [PERF] TSDB: MemPostings: keep a map of label values slices. #15426
|
||||
* [PERF] Remote-Write: Remove interning hook. #15456
|
||||
* [PERF] Scrape: optimize string manipulation for experimental native histograms with custom buckets. #15453
|
||||
* [PERF] TSDB: reduce memory allocations. #15465, #15427
|
||||
* [PERF] Storage: Implement limit in mergeGenericQuerier. #14489
|
||||
* [PERF] TSDB: Optimize inverse matching. #14144
|
||||
* [PERF] Regex: use stack memory for lowercase copy of string. #15210
|
||||
* [PERF] TSDB: When deleting from postings index, pause to unlock and let readers read. #15242
|
||||
* [BUGFIX] Main: Avoid possible segfault at exit. (#15724)
|
||||
* [BUGFIX] Rules: Do not run rules concurrently if uncertain about dependencies. #15560
|
||||
* [BUGFIX] PromQL: Adds test for `absent`, `absent_over_time` and `deriv` func with histograms. #15667
|
||||
* [BUGFIX] PromQL: Fix various bugs related to quoting UTF-8 characters. #15531
|
||||
* [BUGFIX] Scrape: fix nil panic after scrape loop reload. #15563
|
||||
* [BUGFIX] Remote-write: fix panic on repeated log message. #15562
|
||||
* [BUGFIX] Scrape: reload would ignore always_scrape_classic_histograms and convert_classic_histograms_to_nhcb configs. #15489
|
||||
* [BUGFIX] TSDB: fix data corruption in experimental native histograms. #15482
|
||||
* [BUGFIX] PromQL: Ignore histograms in all time related functions. #15479
|
||||
* [BUGFIX] OTLP receiver: Convert metric metadata. #15416
|
||||
* [BUGFIX] PromQL: Fix `resets` function for histograms. #15527
|
||||
* [BUGFIX] PromQL: Fix behaviour of `changes()` for mix of histograms and floats. #15469
|
||||
* [BUGFIX] PromQL: Fix behaviour of some aggregations with histograms. #15432
|
||||
* [BUGFIX] allow quoted exemplar keys in openmetrics text format. #15260
|
||||
* [BUGFIX] TSDB: fixes for rare conditions when loading write-behind-log (WBL). #15380
|
||||
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
|
||||
* [BUGFIX] Promtool: analyze block shows metric name with 0 cardinality. #15438
|
||||
* [BUGFIX] PromQL: Fix `count_values` for histograms. #15422
|
||||
* [BUGFIX] PromQL: fix issues with comparison binary operations with `bool` modifier and native histograms. #15413
|
||||
* [BUGFIX] PromQL: fix incorrect "native histogram ignored in aggregation" annotations. #15414
|
||||
* [BUGFIX] PromQL: Corrects the behaviour of some operator and aggregators with Native Histograms. #15245
|
||||
* [BUGFIX] TSDB: Always return unknown hint for first sample in non-gauge histogram chunk. #15343
|
||||
* [BUGFIX] PromQL: Clamp functions: Ignore any points with native histograms. #15169
|
||||
* [BUGFIX] TSDB: Fix race on stale values in headAppender. #15322
|
||||
* [BUGFIX] UI: Fix selector / series formatting for empty metric names. #15340
|
||||
* [BUGFIX] OTLP receiver: Allow colons in non-standard units. #15710
|
||||
|
||||
## 3.0.1 / 2024-11-28
|
||||
|
||||
The first bug fix release for Prometheus 3.
|
||||
|
||||
* [BUGFIX] Promql: Make subqueries left open. #15431
|
||||
* [BUGFIX] Fix memory leak when query log is enabled. #15434
|
||||
* [BUGFIX] Support utf8 names on /v1/label/:name/values endpoint. #15399
|
||||
|
||||
## 3.0.0 / 2024-11-14
|
||||
|
||||
This release includes new features such as a brand new UI and UTF-8 support enabled by default. As this marks the first new major version in seven years, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. For users that want to upgrade we recommend to read through our [migration guide](https://prometheus.io/docs/prometheus/3.0/migration/).
|
||||
|
||||
* [CHANGE] Set the `GOMAXPROCS` variable automatically to match the Linux CPU quota. Use `--no-auto-gomaxprocs` to disable it. The `auto-gomaxprocs` feature flag was removed. #15376
|
||||
* [CHANGE] Set the `GOMEMLIMIT` variable automatically to match the Linux container memory limit. Use `--no-auto-gomemlimit` to disable it. The `auto-gomemlimit` feature flag was removed. #15373
|
||||
* [CHANGE] Scraping: Remove implicit fallback to the Prometheus text format in case of invalid/missing Content-Type and fail the scrape instead. Add ability to specify a `fallback_scrape_protocol` in the scrape config. #15136
|
||||
* [CHANGE] Remote-write: default enable_http2 to false. #15219
|
||||
* [CHANGE] Scraping: normalize "le" and "quantile" label values upon ingestion. #15164
|
||||
* [CHANGE] Scraping: config `scrape_classic_histograms` was renamed to `always_scrape_classic_histograms`. #15178
|
||||
* [CHANGE] Config: remove expand-external-labels flag, expand external labels env vars by default. #14657
|
||||
* [CHANGE] Disallow configuring AM with the v1 api. #13883
|
||||
* [CHANGE] regexp `.` now matches all characters (performance improvement). #14505
|
||||
* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930
|
||||
* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894
|
||||
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
|
||||
|
||||
## 3.0.0-beta.0 / 2024-09-05
|
||||
|
||||
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
|
||||
|
||||
As is traditional with a beta release, we do **not** recommend users install 3.0.0-beta on critical production systems, but we do want everyone to test it out and find bugs.
|
||||
|
||||
* [CHANGE] Prometheus will not add or remove port numbers from the target address. `no-default-scrape-port` feature flag removed. #14160
|
||||
* [CHANGE] Logging: the format of log lines has changed a little, along with the adoption of Go's Structured Logging package. #14906
|
||||
* [CHANGE] Don't create extra `_created` timeseries if feature-flag `created-timestamp-zero-ingestion` is enabled. #14738
|
||||
* [CHANGE] Float literals and time durations being the same is now a stable fetaure. #15111
|
||||
* [CHANGE] UI: The old web UI has been replaced by a completely new one that is less cluttered and adds a few new features (PromLens-style tree view, better metrics explorer, "Explain" tab). However, it is still missing some features of the old UI (notably, exemplar display and heatmaps). To switch back to the old UI, you can use the feature flag `--enable-feature=old-ui` for the time being. #14872
|
||||
* [CHANGE] PromQL: Range selectors and the lookback delta are now left-open, i.e. a sample coinciding with the lower time limit is excluded rather than included. #13904
|
||||
* [CHANGE] Kubernetes SD: Remove support for `discovery.k8s.io/v1beta1` API version of EndpointSlice. This version is no longer served as of Kubernetes v1.25. #14365
|
||||
* [CHANGE] Kubernetes SD: Remove support for `networking.k8s.io/v1beta1` API version of Ingress. This version is no longer served as of Kubernetes v1.22. #14365
|
||||
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705
|
||||
* [CHANGE] UTF-8: Enable UTF-8 support by default. Prometheus now allows all UTF-8 characters in metric and label names. The corresponding `utf8-name` feature flag has been removed. #14705, #15258
|
||||
* [CHANGE] Console: Remove example files for the console feature. Users can continue using the console feature by supplying their own JavaScript and templates. #14807
|
||||
* [CHANGE] SD: Enable the new service discovery manager by default. This SD manager does not restart unchanged discoveries upon reloading. This makes reloads faster and reduces pressure on service discoveries' sources. The corresponding `new-service-discovery-manager` feature flag has been removed. #14770
|
||||
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
||||
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||
* [FEATURE] OTLP receiver: Ability to skip UTF-8 normalization using `otlp.translation_strategy = NoUTF8EscapingWithSuffixes` configuration option. #15384
|
||||
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769, #15011
|
||||
* [ENHANCEMENT] Scraping, rules: handle targets reappearing, or rules moving group, when out-of-order is enabled. #14710
|
||||
* [ENHANCEMENT] Tools: add debug printouts to promtool rules unit testing #15196
|
||||
* [ENHANCEMENT] Scraping: support Created-Timestamp feature on native histograms. #14694
|
||||
* [ENHANCEMENT] UI: Many fixes and improvements. #14898, #14899, #14907, #14908, #14912, #14913, #14914, #14931, #14940, #14945, #14946, #14972, #14981, #14982, #14994, #15096
|
||||
* [ENHANCEMENT] UI: Web UI now displays notifications, e.g. when starting up and shutting down. #15082
|
||||
* [ENHANCEMENT] PromQL: Introduce exponential interpolation for native histograms. #14677
|
||||
* [ENHANCEMENT] TSDB: Add support for ingestion of out-of-order native histogram samples. #14850, #14546
|
||||
* [ENHANCEMENT] Alerts: remove metrics for removed Alertmanagers. #13909
|
||||
* [ENHANCEMENT] Kubernetes SD: Support sidecar containers in endpoint discovery. #14929
|
||||
* [ENHANCEMENT] Consul SD: Support catalog filters. #11224
|
||||
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
||||
* [PERF] TSDB: Parallelize deletion of postings after head compaction. #14975
|
||||
* [PERF] TSDB: Chunk encoding: shorten some write sequences. #14932
|
||||
* [PERF] TSDB: Grow postings by doubling. #14721
|
||||
* [PERF] Relabeling: Optimize adding a constant label pair. #12180
|
||||
* [BUGFIX] Scraping: Don't log errors on empty scrapes. #15357
|
||||
* [BUGFIX] UI: fix selector / series formatting for empty metric names. #15341
|
||||
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to always ignore native histograms. #14941
|
||||
* [BUGFIX] PromQL: Fix stddev+stdvar aggregations to treat Infinity consistently. #14941
|
||||
* [BUGFIX] OTLP receiver: Preserve colons when generating metric names in suffix adding mode (this mode is always enabled, unless one uses Prometheus as a library). #15251
|
||||
* [BUGFIX] Scraping: Unit was missing when using protobuf format. #15095
|
||||
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
|
||||
* [BUGFIX] TSDB: Chunks could have one unnecessary zero byte at the end. #14854
|
||||
* [BUGFIX] "superfluous response.WriteHeader call" messages in log. #14884
|
||||
* [BUGFIX] PromQL: Unary negation of native histograms. #14821
|
||||
* [BUGFIX] PromQL: Handle stale marker in native histogram series (e.g. if series goes away and comes back). #15025
|
||||
* [BUGFIX] Autoreload: Reload invalid yaml files. #14947
|
||||
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
|
||||
|
||||
## 2.55.0-rc.0 / 2024-09-20
|
||||
## 2.53.3 / 2024-11-04
|
||||
|
||||
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685, #14740
|
||||
|
||||
## 2.53.2 / 2024-08-09
|
||||
|
||||
Fix a bug where Prometheus would crash with a segmentation fault if a remote-read
|
||||
request accessed a block on disk at about the same time as TSDB created a new block.
|
||||
|
||||
[BUGFIX] Remote-Read: Resolve occasional segmentation fault on query. #14515,#14523
|
||||
|
||||
## 2.55.1 / 2024-11-04
|
||||
|
||||
* [BUGFIX] `round()` function did not remove `__name__` label. #15250
|
||||
|
||||
## 2.55.0 / 2024-10-22
|
||||
|
||||
* [FEATURE] PromQL: Add experimental `info` function. #14495
|
||||
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
|
||||
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
|
||||
* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817
|
||||
* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815
|
||||
* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734
|
||||
|
@ -36,19 +146,21 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
|
|||
* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346
|
||||
* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403
|
||||
* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506
|
||||
* [FEATURE] TSDB: Add `delayed-compaction` feature flag, for people running many Prometheus to randomize timing. #12532
|
||||
* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706
|
||||
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
||||
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
||||
* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450
|
||||
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
||||
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655
|
||||
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655, #14985
|
||||
* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621
|
||||
* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413
|
||||
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816
|
||||
* [ENHANCEMENT] API: Support multiple listening addresses. #14665
|
||||
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
|
||||
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948
|
||||
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948, #15120
|
||||
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
|
||||
* [BUGFIX] PromQL: make sort_by_label stable. #14985
|
||||
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
|
||||
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622
|
||||
* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810
|
||||
|
@ -59,7 +171,7 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
|
|||
|
||||
## 2.54.1 / 2024-08-27
|
||||
|
||||
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps. #14685
|
||||
* [BUGFIX] Scraping: allow multiple samples on same series, with explicit timestamps (mixing samples of the same series with and without timestamps is still rejected). #14685
|
||||
* [BUGFIX] Docker SD: fix crash in `match_first_network` mode when container is reconnected to a new network. #14654
|
||||
* [BUGFIX] PromQL: fix experimental native histograms getting corrupted due to vector selector bug in range queries. #14538
|
||||
* [BUGFIX] PromQL: fix experimental native histogram counter reset detection on stale samples. #14514
|
||||
|
@ -141,6 +253,7 @@ This release changes the default for GOGC, the Go runtime control for the trade-
|
|||
## 2.52.0 / 2024-05-07
|
||||
|
||||
* [CHANGE] TSDB: Fix the predicate checking for blocks which are beyond the retention period to include the ones right at the retention boundary. #9633
|
||||
* [CHANGE] Scrape: Multiple samples (even with different timestamps) are treated as duplicates during one scrape.
|
||||
* [FEATURE] Kubernetes SD: Add a new metric `prometheus_sd_kubernetes_failures_total` to track failed requests to Kubernetes API. #13554
|
||||
* [FEATURE] Kubernetes SD: Add node and zone metadata labels when using the endpointslice role. #13935
|
||||
* [FEATURE] Azure SD/Remote Write: Allow usage of Azure authorization SDK. #13099
|
||||
|
|
|
@ -2,6 +2,7 @@ ARG ARCH="amd64"
|
|||
ARG OS="linux"
|
||||
FROM quay.io/prometheus/busybox-${OS}-${ARCH}:latest
|
||||
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||
LABEL org.opencontainers.image.source="https://github.com/prometheus/prometheus"
|
||||
|
||||
ARG ARCH="amd64"
|
||||
ARG OS="linux"
|
||||
|
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
||||
GOLANGCI_LINT_VERSION ?= v1.63.4
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
19
README.md
19
README.md
|
@ -67,7 +67,7 @@ Prometheus will now be reachable at <http://localhost:9090/>.
|
|||
|
||||
To build Prometheus from source code, You need:
|
||||
|
||||
* Go [version 1.17 or greater](https://golang.org/doc/install).
|
||||
* Go [version 1.22 or greater](https://golang.org/doc/install).
|
||||
* NodeJS [version 16 or greater](https://nodejs.org/).
|
||||
* npm [version 7 or greater](https://www.npmjs.com/).
|
||||
|
||||
|
@ -158,8 +158,19 @@ This is experimental.
|
|||
### Prometheus code base
|
||||
|
||||
In order to comply with [go mod](https://go.dev/ref/mod#versions) rules,
|
||||
Prometheus release number do not exactly match Go module releases. For the
|
||||
Prometheus v2.y.z releases, we are publishing equivalent v0.y.z tags.
|
||||
Prometheus release number do not exactly match Go module releases.
|
||||
|
||||
For the
|
||||
Prometheus v3.y.z releases, we are publishing equivalent v0.3y.z tags. The y in v0.3y.z is always padded to two digits, with a leading zero if needed.
|
||||
|
||||
Therefore, a user that would want to use Prometheus v3.0.0 as a library could do:
|
||||
|
||||
```shell
|
||||
go get github.com/prometheus/prometheus@v0.300.0
|
||||
```
|
||||
|
||||
For the
|
||||
Prometheus v2.y.z releases, we published the equivalent v0.y.z tags.
|
||||
|
||||
Therefore, a user that would want to use Prometheus v2.35.0 as a library could do:
|
||||
|
||||
|
@ -177,7 +188,7 @@ For more information on building, running, and developing on the React-based UI,
|
|||
|
||||
## More information
|
||||
|
||||
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v2.x.y will be displayed as v0.x.y.
|
||||
* Godoc documentation is available via [pkg.go.dev](https://pkg.go.dev/github.com/prometheus/prometheus). Due to peculiarities of Go Modules, v3.y.z will be displayed as v0.3y.z (the y in v0.3y.z is always padded to two digits, with a leading zero if needed), while v2.y.z will be displayed as v0.y.z.
|
||||
* See the [Community page](https://prometheus.io/community) for how to reach the Prometheus developers and users on various communication channels.
|
||||
|
||||
## Contributing
|
||||
|
|
64
RELEASE.md
64
RELEASE.md
|
@ -5,61 +5,15 @@ This page describes the release process and the currently planned schedule for u
|
|||
## Release schedule
|
||||
|
||||
Release cadence of first pre-releases being cut is 6 weeks.
|
||||
Please see [the v2.55 RELEASE.md](https://github.com/prometheus/prometheus/blob/release-2.55/RELEASE.md) for the v2 release series schedule.
|
||||
|
||||
| release series | date of first pre-release (year-month-day) | release shepherd |
|
||||
|----------------|--------------------------------------------|---------------------------------------------|
|
||||
| v2.4 | 2018-09-06 | Goutham Veeramachaneni (GitHub: @gouthamve) |
|
||||
| v2.5 | 2018-10-24 | Frederic Branczyk (GitHub: @brancz) |
|
||||
| v2.6 | 2018-12-05 | Simon Pasquier (GitHub: @simonpasquier) |
|
||||
| v2.7 | 2019-01-16 | Goutham Veeramachaneni (GitHub: @gouthamve) |
|
||||
| v2.8 | 2019-02-27 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.9 | 2019-04-10 | Brian Brazil (GitHub: @brian-brazil) |
|
||||
| v2.10 | 2019-05-22 | Björn Rabenstein (GitHub: @beorn7) |
|
||||
| v2.11 | 2019-07-03 | Frederic Branczyk (GitHub: @brancz) |
|
||||
| v2.12 | 2019-08-14 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.13 | 2019-09-25 | Krasi Georgiev (GitHub: @krasi-georgiev) |
|
||||
| v2.14 | 2019-11-06 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||
| v2.15 | 2019-12-18 | Bartek Plotka (GitHub: @bwplotka) |
|
||||
| v2.16 | 2020-01-29 | Callum Styan (GitHub: @cstyan) |
|
||||
| v2.17 | 2020-03-11 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.18 | 2020-04-22 | Bartek Plotka (GitHub: @bwplotka) |
|
||||
| v2.19 | 2020-06-03 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.20 | 2020-07-15 | Björn Rabenstein (GitHub: @beorn7) |
|
||||
| v2.21 | 2020-08-26 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.22 | 2020-10-07 | Frederic Branczyk (GitHub: @brancz) |
|
||||
| v2.23 | 2020-11-18 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.24 | 2020-12-30 | Björn Rabenstein (GitHub: @beorn7) |
|
||||
| v2.25 | 2021-02-10 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.26 | 2021-03-24 | Bartek Plotka (GitHub: @bwplotka) |
|
||||
| v2.27 | 2021-05-05 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||
| v2.28 | 2021-06-16 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.29 | 2021-07-28 | Frederic Branczyk (GitHub: @brancz) |
|
||||
| v2.30 | 2021-09-08 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.31 | 2021-10-20 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.32 | 2021-12-01 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.33 | 2022-01-12 | Björn Rabenstein (GitHub: @beorn7) |
|
||||
| v2.34 | 2022-02-23 | Chris Marchbanks (GitHub: @csmarchbanks) |
|
||||
| v2.35 | 2022-04-06 | Augustin Husson (GitHub: @nexucis) |
|
||||
| v2.36 | 2022-05-18 | Matthias Loibl (GitHub: @metalmatze) |
|
||||
| v2.37 LTS | 2022-06-29 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.38 | 2022-08-10 | Julius Volz (GitHub: @juliusv) |
|
||||
| v2.39 | 2022-09-21 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.40 | 2022-11-02 | Ganesh Vernekar (GitHub: @codesome) |
|
||||
| v2.41 | 2022-12-14 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.42 | 2023-01-25 | Kemal Akkoyun (GitHub: @kakkoyun) |
|
||||
| v2.43 | 2023-03-08 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.44 | 2023-04-19 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.45 LTS | 2023-05-31 | Jesus Vazquez (Github: @jesusvazquez) |
|
||||
| v2.46 | 2023-07-12 | Julien Pivotto (GitHub: @roidelapluie) |
|
||||
| v2.47 | 2023-08-23 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.48 | 2023-10-04 | Levi Harrison (GitHub: @LeviHarrison) |
|
||||
| v2.49 | 2023-12-05 | Bartek Plotka (GitHub: @bwplotka) |
|
||||
| v2.50 | 2024-01-16 | Augustin Husson (GitHub: @nexucis) |
|
||||
| v2.51 | 2024-03-07 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
|
||||
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
|
||||
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v2.55 | 2024-09-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| release series | date of first pre-release (year-month-day) | release shepherd |
|
||||
|----------------|--------------------------------------------|-----------------------------------|
|
||||
| v3.0 | 2024-11-14 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.1 | 2024-12-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||
| v3.2 | 2025-01-28 | Jan Fajerski (GitHub: @jan--f) |
|
||||
| v3.3 | 2025-03-11 | Ayoub Mrini (Github: @machine424) |
|
||||
| v3.4 | 2025-04-22 | **volunteer welcome** |
|
||||
|
||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||
|
||||
|
@ -204,7 +158,7 @@ Then release with `git tag-release`.
|
|||
|
||||
Signing a tag with a GPG key is appreciated, but in case you can't add a GPG key to your Github account using the following [procedure](https://help.github.com/articles/generating-a-gpg-key/), you can replace the `-s` flag by `-a` flag of the `git tag` command to only annotate the tag without signing.
|
||||
|
||||
Once a tag is created, the release process through CircleCI will be triggered for this tag and Circle CI will draft the GitHub release using the `prombot` account.
|
||||
Once a tag is created, the release process through Github Actions will be triggered for this tag and Github Actions will draft the GitHub release using the `prombot` account.
|
||||
|
||||
Finally, wait for the build step for the tag to finish. The point here is to wait for tarballs to be uploaded to the Github release and the container images to be pushed to the Docker Hub and Quay.io. Once that has happened, click _Publish release_, which will make the release publicly visible and create a GitHub notification.
|
||||
**Note:** for a release candidate version ensure the _This is a pre-release_ box is checked when drafting the release in the Github UI. The CI job should take care of this but it's a good idea to double check before clicking _Publish release_.`
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
goregexp "regexp" //nolint:depguard // The Prometheus client library requires us to pass a regexp from this package.
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
|
@ -190,21 +191,20 @@ type flagConfig struct {
|
|||
queryConcurrency int
|
||||
queryMaxSamples int
|
||||
RemoteFlushDeadline model.Duration
|
||||
nameEscapingScheme string
|
||||
maxNotificationsSubscribers int
|
||||
|
||||
enableAutoReload bool
|
||||
autoReloadInterval model.Duration
|
||||
|
||||
featureList []string
|
||||
memlimitRatio float64
|
||||
maxprocsEnable bool
|
||||
memlimitEnable bool
|
||||
memlimitRatio float64
|
||||
|
||||
featureList []string
|
||||
// These options are extracted from featureList
|
||||
// for ease of use.
|
||||
enableExpandExternalLabels bool
|
||||
enablePerStepStats bool
|
||||
enableAutoGOMAXPROCS bool
|
||||
enableAutoGOMEMLIMIT bool
|
||||
enableConcurrentRuleEval bool
|
||||
enablePerStepStats bool
|
||||
enableConcurrentRuleEval bool
|
||||
|
||||
prometheusURL string
|
||||
corsRegexString string
|
||||
|
@ -220,9 +220,6 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "expand-external-labels":
|
||||
c.enableExpandExternalLabels = true
|
||||
logger.Info("Experimental expand-external-labels enabled")
|
||||
case "exemplar-storage":
|
||||
c.tsdb.EnableExemplarStorage = true
|
||||
logger.Info("Experimental in-memory exemplar storage enabled")
|
||||
|
@ -238,18 +235,12 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
case "promql-per-step-stats":
|
||||
c.enablePerStepStats = true
|
||||
logger.Info("Experimental per-step statistics reporting")
|
||||
case "auto-gomaxprocs":
|
||||
c.enableAutoGOMAXPROCS = true
|
||||
logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
case "auto-reload-config":
|
||||
c.enableAutoReload = true
|
||||
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
|
||||
c.autoReloadInterval, _ = model.ParseDuration("1s")
|
||||
}
|
||||
logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
|
||||
case "auto-gomemlimit":
|
||||
c.enableAutoGOMEMLIMIT = true
|
||||
logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||
case "concurrent-rule-eval":
|
||||
c.enableConcurrentRuleEval = true
|
||||
logger.Info("Experimental concurrent rule evaluation enabled.")
|
||||
|
@ -268,6 +259,7 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
|
||||
case "created-timestamp-zero-ingestion":
|
||||
c.scrape.EnableCreatedTimestampZeroIngestion = true
|
||||
c.web.CTZeroIngestionEnabled = true
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
|
@ -283,6 +275,9 @@ func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
|||
case "old-ui":
|
||||
c.web.UseOldUI = true
|
||||
logger.Info("Serving previous version of the Prometheus web UI.")
|
||||
case "otlp-deltatocumulative":
|
||||
c.web.ConvertOTLPDelta = true
|
||||
logger.Info("Converting delta OTLP metrics to cumulative")
|
||||
default:
|
||||
logger.Warn("Unknown option for --enable-feature", "option", o)
|
||||
}
|
||||
|
@ -305,6 +300,7 @@ func main() {
|
|||
collectors.WithGoCollectorRuntimeMetrics(
|
||||
collectors.MetricsGC,
|
||||
collectors.MetricsScheduler,
|
||||
collectors.GoRuntimeMetricsRule{Matcher: goregexp.MustCompile(`^/sync/mutex/wait/total:seconds$`)},
|
||||
),
|
||||
),
|
||||
)
|
||||
|
@ -336,6 +332,10 @@ func main() {
|
|||
a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry. Can be repeated.").
|
||||
Default("0.0.0.0:9090").StringsVar(&cfg.web.ListenAddresses)
|
||||
|
||||
a.Flag("auto-gomaxprocs", "Automatically set GOMAXPROCS to match Linux container CPU quota").
|
||||
Default("true").BoolVar(&cfg.maxprocsEnable)
|
||||
a.Flag("auto-gomemlimit", "Automatically set GOMEMLIMIT to match Linux container or system memory limit").
|
||||
Default("true").BoolVar(&cfg.memlimitEnable)
|
||||
a.Flag("auto-gomemlimit.ratio", "The ratio of reserved GOMEMLIMIT memory to the detected maximum container or system memory").
|
||||
Default("0.9").FloatVar(&cfg.memlimitRatio)
|
||||
|
||||
|
@ -437,6 +437,9 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.samples-per-chunk", "Target number of samples per chunk.").
|
||||
Default("120").Hidden().IntVar(&cfg.tsdb.SamplesPerChunk)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.delayed-compaction.max-percent", "Sets the upper limit for the random compaction delay, specified as a percentage of the head chunk range. 100 means the compaction can be delayed by up to the entire head chunk range. Only effective when the delayed-compaction feature flag is enabled.").
|
||||
Default("10").Hidden().IntVar(&cfg.tsdb.CompactionDelayMaxPercent)
|
||||
|
||||
agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
|
||||
Default("data-agent/").StringVar(&cfg.agentStoragePath)
|
||||
|
||||
|
@ -516,7 +519,7 @@ func main() {
|
|||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui, otlp-deltatocumulative. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||
Default("").StringsVar(&cfg.featureList)
|
||||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
@ -534,7 +537,7 @@ func main() {
|
|||
|
||||
_, err := a.Parse(os.Args[1:])
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing command line arguments: %w", err))
|
||||
fmt.Fprintf(os.Stderr, "Error parsing command line arguments: %s\n", err)
|
||||
a.Usage(os.Args[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
|
@ -548,19 +551,10 @@ func main() {
|
|||
notifs.AddNotification(notifications.StartingUp)
|
||||
|
||||
if err := cfg.setFeatureListOptions(logger); err != nil {
|
||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err))
|
||||
fmt.Fprintf(os.Stderr, "Error parsing feature list: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if cfg.nameEscapingScheme != "" {
|
||||
scheme, err := model.ToEscapingScheme(cfg.nameEscapingScheme)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, `Invalid name escaping scheme: %q; Needs to be one of "values", "underscores", or "dots"`, cfg.nameEscapingScheme)
|
||||
os.Exit(1)
|
||||
}
|
||||
model.NameEscapingScheme = scheme
|
||||
}
|
||||
|
||||
if agentMode && len(serverOnlyFlags) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
|
||||
os.Exit(3)
|
||||
|
@ -595,7 +589,7 @@ func main() {
|
|||
|
||||
// Throw error for invalid config before starting other components.
|
||||
var cfgFile *config.Config
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil {
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, promslog.NewNopLogger()); err != nil {
|
||||
absPath, pathErr := filepath.Abs(cfg.configFile)
|
||||
if pathErr != nil {
|
||||
absPath = cfg.configFile
|
||||
|
@ -603,12 +597,14 @@ func main() {
|
|||
logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
// Get scrape configs to validate dynamically loaded scrape_config_files.
|
||||
// They can change over time, but do the extra validation on startup for better experience.
|
||||
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
|
||||
absPath, pathErr := filepath.Abs(cfg.configFile)
|
||||
if pathErr != nil {
|
||||
absPath = cfg.configFile
|
||||
}
|
||||
logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
||||
logger.Error(fmt.Sprintf("Error loading dynamic scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if cfg.tsdb.EnableExemplarStorage {
|
||||
|
@ -667,6 +663,12 @@ func main() {
|
|||
|
||||
cfg.tsdb.MaxBlockDuration = maxBlockDuration
|
||||
}
|
||||
|
||||
// Delayed compaction checks
|
||||
if cfg.tsdb.EnableDelayedCompaction && (cfg.tsdb.CompactionDelayMaxPercent > 100 || cfg.tsdb.CompactionDelayMaxPercent <= 0) {
|
||||
logger.Warn("The --storage.tsdb.delayed-compaction.max-percent should have a value between 1 and 100. Using default", "default", tsdb.DefaultCompactionDelayMaxPercent)
|
||||
cfg.tsdb.CompactionDelayMaxPercent = tsdb.DefaultCompactionDelayMaxPercent
|
||||
}
|
||||
}
|
||||
|
||||
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
|
||||
|
@ -761,7 +763,7 @@ func main() {
|
|||
ruleManager *rules.Manager
|
||||
)
|
||||
|
||||
if cfg.enableAutoGOMAXPROCS {
|
||||
if cfg.maxprocsEnable {
|
||||
l := func(format string, a ...interface{}) {
|
||||
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
||||
}
|
||||
|
@ -770,7 +772,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
if cfg.enableAutoGOMEMLIMIT {
|
||||
if cfg.memlimitEnable {
|
||||
if _, err := memlimit.SetGoMemLimitWithOpts(
|
||||
memlimit.WithRatio(cfg.memlimitRatio),
|
||||
memlimit.WithProvider(
|
||||
|
@ -990,18 +992,12 @@ func main() {
|
|||
listeners, err := webHandler.Listeners()
|
||||
if err != nil {
|
||||
logger.Error("Unable to start web listener", "err", err)
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
logger.Warn("Closing query engine failed", "err", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = toolkit_web.Validate(*webConfig)
|
||||
if err != nil {
|
||||
logger.Error("Unable to validate web configuration file", "err", err)
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
logger.Warn("Closing query engine failed", "err", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -1023,9 +1019,6 @@ func main() {
|
|||
case <-cancel:
|
||||
reloadReady.Close()
|
||||
}
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
logger.Warn("Closing query engine failed", "err", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
func(err error) {
|
||||
|
@ -1145,25 +1138,23 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case <-hup:
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
} else if cfg.enableAutoReload {
|
||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
checksum = currentChecksum
|
||||
} else {
|
||||
checksum, err = config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||
}
|
||||
}
|
||||
case rc := <-webHandler.Reload():
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
rc <- err
|
||||
} else {
|
||||
rc <- nil
|
||||
if cfg.enableAutoReload {
|
||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
checksum = currentChecksum
|
||||
} else {
|
||||
checksum, err = config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||
}
|
||||
}
|
||||
|
@ -1174,13 +1165,14 @@ func main() {
|
|||
}
|
||||
currentChecksum, err := config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
checksum = currentChecksum
|
||||
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||
} else if currentChecksum == checksum {
|
||||
continue
|
||||
}
|
||||
logger.Info("Configuration file change detected, reloading the configuration.")
|
||||
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
} else {
|
||||
checksum = currentChecksum
|
||||
|
@ -1210,7 +1202,7 @@ func main() {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
|
||||
if err := reloadConfig(cfg.configFile, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
|
||||
return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err)
|
||||
}
|
||||
|
||||
|
@ -1374,10 +1366,12 @@ func main() {
|
|||
},
|
||||
)
|
||||
}
|
||||
if err := g.Run(); err != nil {
|
||||
logger.Error("Error running goroutines from run.Group", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
func() { // This function exists so the top of the stack is named 'main.main.funcxxx' and not 'oklog'.
|
||||
if err := g.Run(); err != nil {
|
||||
logger.Error("Fatal error", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
logger.Info("See you next time!")
|
||||
}
|
||||
|
||||
|
@ -1437,7 +1431,7 @@ type reloader struct {
|
|||
reloader func(*config.Config) error
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||
func reloadConfig(filename string, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||
start := time.Now()
|
||||
timingsLogger := logger
|
||||
logger.Info("Loading configuration file", "filename", filename)
|
||||
|
@ -1453,7 +1447,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
|||
}
|
||||
}()
|
||||
|
||||
conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger)
|
||||
conf, err := config.LoadFile(filename, agentMode, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't load configuration (--config.file=%q): %w", filename, err)
|
||||
}
|
||||
|
@ -1643,6 +1637,9 @@ func (s *readyStorage) Appender(ctx context.Context) storage.Appender {
|
|||
|
||||
type notReadyAppender struct{}
|
||||
|
||||
// SetOptions does nothing in this appender implementation.
|
||||
func (n notReadyAppender) SetOptions(opts *storage.AppendOptions) {}
|
||||
|
||||
func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
|
||||
return 0, tsdb.ErrNotReady
|
||||
}
|
||||
|
@ -1748,7 +1745,7 @@ func (s *readyStorage) WALReplayStatus() (tsdb.WALReplayStatus, error) {
|
|||
}
|
||||
|
||||
// ErrNotReady is returned if the underlying scrape manager is not ready yet.
|
||||
var ErrNotReady = errors.New("Scrape manager not ready")
|
||||
var ErrNotReady = errors.New("scrape manager not ready")
|
||||
|
||||
// ReadyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time.
|
||||
type readyScrapeManager struct {
|
||||
|
@ -1797,6 +1794,7 @@ type tsdbOptions struct {
|
|||
EnableMemorySnapshotOnShutdown bool
|
||||
EnableNativeHistograms bool
|
||||
EnableDelayedCompaction bool
|
||||
CompactionDelayMaxPercent int
|
||||
EnableOverlappingCompaction bool
|
||||
EnableOOONativeHistograms bool
|
||||
}
|
||||
|
@ -1821,6 +1819,7 @@ func (opts tsdbOptions) ToTSDBOptions() tsdb.Options {
|
|||
EnableOOONativeHistograms: opts.EnableOOONativeHistograms,
|
||||
OutOfOrderTimeWindow: opts.OutOfOrderTimeWindow,
|
||||
EnableDelayedCompaction: opts.EnableDelayedCompaction,
|
||||
CompactionDelayMaxPercent: opts.CompactionDelayMaxPercent,
|
||||
EnableOverlappingCompaction: opts.EnableOverlappingCompaction,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,6 +125,7 @@ func TestFailedStartupExitCode(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
fakeInputFile := "fake-input-file"
|
||||
expectedExitStatus := 2
|
||||
|
@ -211,83 +212,125 @@ func TestWALSegmentSizeBounds(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
for _, tc := range []struct {
|
||||
size string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
size: "9MB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "257MB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "10",
|
||||
exitCode: 2,
|
||||
},
|
||||
{
|
||||
size: "1GB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "12MB",
|
||||
exitCode: 0,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.size, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
slurp, _ := io.ReadAll(stderr)
|
||||
t.Log(string(slurp))
|
||||
}()
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
slurp, _ := io.ReadAll(stderr)
|
||||
t.Log(string(slurp))
|
||||
}()
|
||||
|
||||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
if expectedExitStatus == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
require.Fail(t, "prometheus should be still running: %v", err)
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
if tc.exitCode == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
require.Fail(t, "prometheus should be still running: %v", err)
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = prom.Wait()
|
||||
require.Error(t, err)
|
||||
var exitError *exec.ExitError
|
||||
require.ErrorAs(t, err, &exitError)
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||
err = prom.Wait()
|
||||
require.Error(t, err)
|
||||
var exitError *exec.ExitError
|
||||
require.ErrorAs(t, err, &exitError)
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, tc.exitCode, status.ExitStatus())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
for _, tc := range []struct {
|
||||
size string
|
||||
exitCode int
|
||||
}{
|
||||
{
|
||||
size: "512KB",
|
||||
exitCode: 1,
|
||||
},
|
||||
{
|
||||
size: "1MB",
|
||||
exitCode: 0,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.size, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+tc.size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
|
||||
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
slurp, _ := io.ReadAll(stderr)
|
||||
t.Log(string(slurp))
|
||||
}()
|
||||
// Log stderr in case of failure.
|
||||
stderr, err := prom.StderrPipe()
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
slurp, _ := io.ReadAll(stderr)
|
||||
t.Log(string(slurp))
|
||||
}()
|
||||
|
||||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
err = prom.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
if expectedExitStatus == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
require.Fail(t, "prometheus should be still running: %v", err)
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
if tc.exitCode == 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() { done <- prom.Wait() }()
|
||||
select {
|
||||
case err := <-done:
|
||||
require.Fail(t, "prometheus should be still running: %v", err)
|
||||
case <-time.After(startupTime):
|
||||
prom.Process.Kill()
|
||||
<-done
|
||||
}
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err = prom.Wait()
|
||||
require.Error(t, err)
|
||||
var exitError *exec.ExitError
|
||||
require.ErrorAs(t, err, &exitError)
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, expectedExitStatus, status.ExitStatus())
|
||||
err = prom.Wait()
|
||||
require.Error(t, err)
|
||||
var exitError *exec.ExitError
|
||||
require.ErrorAs(t, err, &exitError)
|
||||
status := exitError.Sys().(syscall.WaitStatus)
|
||||
require.Equal(t, tc.exitCode, status.ExitStatus())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -353,6 +396,8 @@ func getCurrentGaugeValuesFor(t *testing.T, reg prometheus.Gatherer, metricNames
|
|||
}
|
||||
|
||||
func TestAgentSuccessfulStartup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+agentConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
|
@ -371,6 +416,8 @@ func TestAgentSuccessfulStartup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--storage.tsdb.path=.", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
|
||||
output := bytes.Buffer{}
|
||||
|
@ -398,6 +445,8 @@ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--agent", "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
|
||||
require.NoError(t, prom.Start())
|
||||
|
||||
|
@ -419,6 +468,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
testcases := []struct {
|
||||
mode string
|
||||
|
@ -433,6 +483,7 @@ func TestModeSpecificFlags(t *testing.T) {
|
|||
|
||||
for _, tc := range testcases {
|
||||
t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
|
||||
|
||||
if tc.mode == "agent" {
|
||||
|
@ -484,6 +535,8 @@ func TestDocumentation(t *testing.T) {
|
|||
if runtime.GOOS == "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
@ -508,6 +561,8 @@ func TestDocumentation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRwProtoMsgFlagParser(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
defaultOpts := config.RemoteWriteProtoMsgs{
|
||||
config.RemoteWriteProtoMsgV1, config.RemoteWriteProtoMsgV2,
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ func TestStartupInterrupt(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))
|
||||
|
||||
|
|
|
@ -456,6 +456,7 @@ func TestQueryLog(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
|
@ -474,6 +475,7 @@ func TestQueryLog(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Run(p.String(), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p.run(t)
|
||||
})
|
||||
}
|
||||
|
|
229
cmd/prometheus/reload_test.go
Normal file
229
cmd/prometheus/reload_test.go
Normal file
|
@ -0,0 +1,229 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
const configReloadMetric = "prometheus_config_last_reload_successful"
|
||||
|
||||
func TestAutoReloadConfig_ValidToValid(t *testing.T) {
|
||||
steps := []struct {
|
||||
configText string
|
||||
expectedInterval string
|
||||
expectedMetric float64
|
||||
}{
|
||||
{
|
||||
configText: `
|
||||
global:
|
||||
scrape_interval: 30s
|
||||
`,
|
||||
expectedInterval: "30s",
|
||||
expectedMetric: 1,
|
||||
},
|
||||
{
|
||||
configText: `
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
`,
|
||||
expectedInterval: "15s",
|
||||
expectedMetric: 1,
|
||||
},
|
||||
{
|
||||
configText: `
|
||||
global:
|
||||
scrape_interval: 30s
|
||||
`,
|
||||
expectedInterval: "30s",
|
||||
expectedMetric: 1,
|
||||
},
|
||||
}
|
||||
|
||||
runTestSteps(t, steps)
|
||||
}
|
||||
|
||||
func TestAutoReloadConfig_ValidToInvalidToValid(t *testing.T) {
|
||||
steps := []struct {
|
||||
configText string
|
||||
expectedInterval string
|
||||
expectedMetric float64
|
||||
}{
|
||||
{
|
||||
configText: `
|
||||
global:
|
||||
scrape_interval: 30s
|
||||
`,
|
||||
expectedInterval: "30s",
|
||||
expectedMetric: 1,
|
||||
},
|
||||
{
|
||||
configText: `
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
invalid_syntax
|
||||
`,
|
||||
expectedInterval: "30s",
|
||||
expectedMetric: 0,
|
||||
},
|
||||
{
|
||||
configText: `
|
||||
global:
|
||||
scrape_interval: 30s
|
||||
`,
|
||||
expectedInterval: "30s",
|
||||
expectedMetric: 1,
|
||||
},
|
||||
}
|
||||
|
||||
runTestSteps(t, steps)
|
||||
}
|
||||
|
||||
func runTestSteps(t *testing.T, steps []struct {
|
||||
configText string
|
||||
expectedInterval string
|
||||
expectedMetric float64
|
||||
},
|
||||
) {
|
||||
configDir := t.TempDir()
|
||||
configFilePath := filepath.Join(configDir, "prometheus.yml")
|
||||
|
||||
t.Logf("Config file path: %s", configFilePath)
|
||||
|
||||
require.NoError(t, os.WriteFile(configFilePath, []byte(steps[0].configText), 0o644), "Failed to write initial config file")
|
||||
|
||||
port := testutil.RandomUnprivilegedPort(t)
|
||||
runPrometheusWithLogging(t, configFilePath, port)
|
||||
|
||||
baseURL := "http://localhost:" + strconv.Itoa(port)
|
||||
require.Eventually(t, func() bool {
|
||||
resp, err := http.Get(baseURL + "/-/ready")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.StatusCode == http.StatusOK
|
||||
}, 5*time.Second, 100*time.Millisecond, "Prometheus didn't become ready in time")
|
||||
|
||||
for i, step := range steps {
|
||||
t.Logf("Step %d", i)
|
||||
require.NoError(t, os.WriteFile(configFilePath, []byte(step.configText), 0o644), "Failed to write config file for step")
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return verifyScrapeInterval(t, baseURL, step.expectedInterval) &&
|
||||
verifyConfigReloadMetric(t, baseURL, step.expectedMetric)
|
||||
}, 10*time.Second, 500*time.Millisecond, "Prometheus config reload didn't happen in time")
|
||||
}
|
||||
}
|
||||
|
||||
func verifyScrapeInterval(t *testing.T, baseURL, expectedInterval string) bool {
|
||||
resp, err := http.Get(baseURL + "/api/v1/status/config")
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
config := struct {
|
||||
Data struct {
|
||||
YAML string `json:"yaml"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
|
||||
require.NoError(t, json.Unmarshal(body, &config))
|
||||
return strings.Contains(config.Data.YAML, "scrape_interval: "+expectedInterval)
|
||||
}
|
||||
|
||||
func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float64) bool {
|
||||
resp, err := http.Get(baseURL + "/metrics")
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
lines := string(body)
|
||||
var actualValue float64
|
||||
found := false
|
||||
|
||||
for _, line := range strings.Split(lines, "\n") {
|
||||
if strings.HasPrefix(line, configReloadMetric) {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 {
|
||||
actualValue, err = strconv.ParseFloat(parts[1], 64)
|
||||
require.NoError(t, err)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return found && actualValue == expectedValue
|
||||
}
|
||||
|
||||
func captureLogsToTLog(t *testing.T, r io.Reader) {
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
t.Log(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
t.Logf("Error reading logs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func runPrometheusWithLogging(t *testing.T, configFilePath string, port int) {
|
||||
stdoutPipe, stdoutWriter := io.Pipe()
|
||||
stderrPipe, stderrWriter := io.Pipe()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
prom := exec.Command(promPath, "-test.main", "--enable-feature=auto-reload-config", "--config.file="+configFilePath, "--config.auto-reload-interval=1s", "--web.listen-address=0.0.0.0:"+strconv.Itoa(port))
|
||||
prom.Stdout = stdoutWriter
|
||||
prom.Stderr = stderrWriter
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
captureLogsToTLog(t, stdoutPipe)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
captureLogsToTLog(t, stderrPipe)
|
||||
}()
|
||||
|
||||
t.Cleanup(func() {
|
||||
prom.Process.Kill()
|
||||
prom.Wait()
|
||||
stdoutWriter.Close()
|
||||
stderrWriter.Close()
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
require.NoError(t, prom.Start())
|
||||
}
|
|
@ -34,8 +34,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
errNotNativeHistogram = fmt.Errorf("not a native histogram")
|
||||
errNotEnoughData = fmt.Errorf("not enough data")
|
||||
errNotNativeHistogram = errors.New("not a native histogram")
|
||||
errNotEnoughData = errors.New("not enough data")
|
||||
|
||||
outputHeader = `Bucket stats for each histogram series over time
|
||||
------------------------------------------------
|
||||
|
@ -169,7 +169,7 @@ func querySamples(ctx context.Context, api v1.API, query string, end time.Time)
|
|||
|
||||
matrix, ok := values.(model.Matrix)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("query of buckets resulted in non-Matrix")
|
||||
return nil, errors.New("query of buckets resulted in non-Matrix")
|
||||
}
|
||||
|
||||
return matrix, nil
|
||||
|
@ -259,7 +259,7 @@ func getBucketCountsAtTime(matrix model.Matrix, numBuckets, timeIdx int) ([]int,
|
|||
prev := matrix[i].Values[timeIdx]
|
||||
// Assume the results are nicely aligned.
|
||||
if curr.Timestamp != prev.Timestamp {
|
||||
return counts, fmt.Errorf("matrix result is not time aligned")
|
||||
return counts, errors.New("matrix result is not time aligned")
|
||||
}
|
||||
counts[i+1] = int(curr.Value - prev.Value)
|
||||
}
|
||||
|
|
|
@ -109,6 +109,7 @@ func init() {
|
|||
}
|
||||
|
||||
func TestGetBucketCountsAtTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
matrix model.Matrix
|
||||
length int
|
||||
|
@ -137,6 +138,7 @@ func TestGetBucketCountsAtTime(t *testing.T) {
|
|||
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("exampleMatrix@%d", c.timeIdx), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res, err := getBucketCountsAtTime(c.matrix, c.length, c.timeIdx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected, res)
|
||||
|
@ -145,6 +147,7 @@ func TestGetBucketCountsAtTime(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCalcClassicBucketStatistics(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
matrix model.Matrix
|
||||
expected *statistics
|
||||
|
@ -162,6 +165,7 @@ func TestCalcClassicBucketStatistics(t *testing.T) {
|
|||
|
||||
for i, c := range cases {
|
||||
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
res, err := calcClassicBucketStatistics(c.matrix)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected, res)
|
||||
|
|
|
@ -49,7 +49,7 @@ func getMinAndMaxTimestamps(p textparse.Parser) (int64, int64, error) {
|
|||
|
||||
_, ts, _ := p.Series()
|
||||
if ts == nil {
|
||||
return 0, 0, fmt.Errorf("expected timestamp for series got none")
|
||||
return 0, 0, errors.New("expected timestamp for series got none")
|
||||
}
|
||||
|
||||
if *ts > maxt {
|
||||
|
|
|
@ -86,6 +86,7 @@ func testBlocks(t *testing.T, db *tsdb.DB, expectedMinTime, expectedMaxTime, exp
|
|||
}
|
||||
|
||||
func TestBackfill(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
ToParse string
|
||||
IsOk bool
|
||||
|
@ -729,6 +730,7 @@ after_eof 1 2
|
|||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.Description, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
t.Logf("Test:%s", test.Description)
|
||||
|
||||
outputDir := t.TempDir()
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
"github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
|
@ -45,7 +45,6 @@ import (
|
|||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
promconfig "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -58,6 +57,7 @@ import (
|
|||
_ "github.com/prometheus/prometheus/plugins" // Register plugins.
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/promql/promqltest"
|
||||
"github.com/prometheus/prometheus/rules"
|
||||
"github.com/prometheus/prometheus/scrape"
|
||||
"github.com/prometheus/prometheus/util/documentcli"
|
||||
)
|
||||
|
@ -73,14 +73,19 @@ const (
|
|||
// Exit code 3 is used for "one or more lint issues detected".
|
||||
lintErrExitCode = 3
|
||||
|
||||
lintOptionAll = "all"
|
||||
lintOptionDuplicateRules = "duplicate-rules"
|
||||
lintOptionNone = "none"
|
||||
checkHealth = "/-/healthy"
|
||||
checkReadiness = "/-/ready"
|
||||
lintOptionAll = "all"
|
||||
lintOptionDuplicateRules = "duplicate-rules"
|
||||
lintOptionTooLongScrapeInterval = "too-long-scrape-interval"
|
||||
lintOptionNone = "none"
|
||||
checkHealth = "/-/healthy"
|
||||
checkReadiness = "/-/ready"
|
||||
)
|
||||
|
||||
var lintOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||
var (
|
||||
lintRulesOptions = []string{lintOptionAll, lintOptionDuplicateRules, lintOptionNone}
|
||||
// Same as lintRulesOptions, but including scrape config linting options as well.
|
||||
lintConfigOptions = append(append([]string{}, lintRulesOptions...), lintOptionTooLongScrapeInterval)
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
|
@ -97,6 +102,10 @@ func main() {
|
|||
app.HelpFlag.Short('h')
|
||||
|
||||
checkCmd := app.Command("check", "Check the resources for validity.")
|
||||
checkLookbackDelta := checkCmd.Flag(
|
||||
"query.lookback-delta",
|
||||
"The server's maximum query lookback duration.",
|
||||
).Default("5m").Duration()
|
||||
|
||||
experimental := app.Flag("experimental", "Enable experimental commands.").Bool()
|
||||
|
||||
|
@ -113,11 +122,12 @@ func main() {
|
|||
checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool()
|
||||
checkConfigLint := checkConfigCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply to the rules specified in the config. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||
"Linting checks to apply to the rules/scrape configs specified in the config. Available options are: "+strings.Join(lintConfigOptions, ", ")+". Use --lint=none to disable linting",
|
||||
).Default(lintOptionDuplicateRules).String()
|
||||
checkConfigLintFatal := checkConfigCmd.Flag(
|
||||
"lint-fatal",
|
||||
"Make lint errors exit with exit code 3.").Default("false").Bool()
|
||||
checkConfigIgnoreUnknownFields := checkConfigCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule groups read by the config files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.")
|
||||
webConfigFiles := checkWebConfigCmd.Arg(
|
||||
|
@ -140,11 +150,12 @@ func main() {
|
|||
).ExistingFiles()
|
||||
checkRulesLint := checkRulesCmd.Flag(
|
||||
"lint",
|
||||
"Linting checks to apply. Available options are: "+strings.Join(lintOptions, ", ")+". Use --lint=none to disable linting",
|
||||
"Linting checks to apply. Available options are: "+strings.Join(lintRulesOptions, ", ")+". Use --lint=none to disable linting",
|
||||
).Default(lintOptionDuplicateRules).String()
|
||||
checkRulesLintFatal := checkRulesCmd.Flag(
|
||||
"lint-fatal",
|
||||
"Make lint errors exit with exit code 3.").Default("false").Bool()
|
||||
checkRulesIgnoreUnknownFields := checkRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the rule files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
|
||||
checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
|
||||
|
@ -216,7 +227,9 @@ func main() {
|
|||
"test-rule-file",
|
||||
"The unit test file.",
|
||||
).Required().ExistingFiles()
|
||||
testRulesDebug := testRulesCmd.Flag("debug", "Enable unit test debugging.").Default("false").Bool()
|
||||
testRulesDiff := testRulesCmd.Flag("diff", "[Experimental] Print colored differential output between expected & received output.").Default("false").Bool()
|
||||
testRulesIgnoreUnknownFields := testRulesCmd.Flag("ignore-unknown-fields", "Ignore unknown fields in the test files. This is useful when you want to extend rule files with custom metadata. Ensure that those fields are removed before loading them into the Prometheus server as it performs strict checks by default.").Default("false").Bool()
|
||||
|
||||
defaultDBPath := "data/"
|
||||
tsdbCmd := app.Command("tsdb", "Run tsdb commands.")
|
||||
|
@ -310,12 +323,12 @@ func main() {
|
|||
kingpin.Fatalf("Cannot set base auth in the server URL and use a http.config.file at the same time")
|
||||
}
|
||||
var err error
|
||||
httpConfig, _, err := config_util.LoadHTTPConfigFile(httpConfigFilePath)
|
||||
httpConfig, _, err := promconfig.LoadHTTPConfigFile(httpConfigFilePath)
|
||||
if err != nil {
|
||||
kingpin.Fatalf("Failed to load HTTP config file: %v", err)
|
||||
}
|
||||
|
||||
httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", config_util.WithUserAgent("promtool/"+version.Version))
|
||||
httpRoundTripper, err = promconfig.NewRoundTripperFromConfig(*httpConfig, "promtool", promconfig.WithUserAgent(version.ComponentUserAgent("promtool")))
|
||||
if err != nil {
|
||||
kingpin.Fatalf("Failed to create a new HTTP round tripper: %v", err)
|
||||
}
|
||||
|
@ -338,7 +351,7 @@ func main() {
|
|||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
|
||||
|
||||
case checkConfigCmd.FullCommand():
|
||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newConfigLintConfig(*checkConfigLint, *checkConfigLintFatal, *checkConfigIgnoreUnknownFields, model.Duration(*checkLookbackDelta)), *configFiles...))
|
||||
|
||||
case checkServerHealthCmd.FullCommand():
|
||||
os.Exit(checkErr(CheckServerStatus(serverURL, checkHealth, httpRoundTripper)))
|
||||
|
@ -350,7 +363,7 @@ func main() {
|
|||
os.Exit(CheckWebConfig(*webConfigFiles...))
|
||||
|
||||
case checkRulesCmd.FullCommand():
|
||||
os.Exit(CheckRules(newLintConfig(*checkRulesLint, *checkRulesLintFatal), *ruleFiles...))
|
||||
os.Exit(CheckRules(newRulesLintConfig(*checkRulesLint, *checkRulesLintFatal, *checkRulesIgnoreUnknownFields), *ruleFiles...))
|
||||
|
||||
case checkMetricsCmd.FullCommand():
|
||||
os.Exit(CheckMetrics(*checkMetricsExtended))
|
||||
|
@ -391,6 +404,8 @@ func main() {
|
|||
},
|
||||
*testRulesRun,
|
||||
*testRulesDiff,
|
||||
*testRulesDebug,
|
||||
*testRulesIgnoreUnknownFields,
|
||||
*testRulesFiles...),
|
||||
)
|
||||
|
||||
|
@ -441,18 +456,20 @@ func checkExperimental(f bool) {
|
|||
}
|
||||
}
|
||||
|
||||
var errLint = fmt.Errorf("lint error")
|
||||
var errLint = errors.New("lint error")
|
||||
|
||||
type lintConfig struct {
|
||||
all bool
|
||||
duplicateRules bool
|
||||
fatal bool
|
||||
type rulesLintConfig struct {
|
||||
all bool
|
||||
duplicateRules bool
|
||||
fatal bool
|
||||
ignoreUnknownFields bool
|
||||
}
|
||||
|
||||
func newLintConfig(stringVal string, fatal bool) lintConfig {
|
||||
func newRulesLintConfig(stringVal string, fatal, ignoreUnknownFields bool) rulesLintConfig {
|
||||
items := strings.Split(stringVal, ",")
|
||||
ls := lintConfig{
|
||||
fatal: fatal,
|
||||
ls := rulesLintConfig{
|
||||
fatal: fatal,
|
||||
ignoreUnknownFields: ignoreUnknownFields,
|
||||
}
|
||||
for _, setting := range items {
|
||||
switch setting {
|
||||
|
@ -462,16 +479,57 @@ func newLintConfig(stringVal string, fatal bool) lintConfig {
|
|||
ls.duplicateRules = true
|
||||
case lintOptionNone:
|
||||
default:
|
||||
fmt.Printf("WARNING: unknown lint option %s\n", setting)
|
||||
fmt.Printf("WARNING: unknown lint option: %q\n", setting)
|
||||
}
|
||||
}
|
||||
return ls
|
||||
}
|
||||
|
||||
func (ls lintConfig) lintDuplicateRules() bool {
|
||||
func (ls rulesLintConfig) lintDuplicateRules() bool {
|
||||
return ls.all || ls.duplicateRules
|
||||
}
|
||||
|
||||
type configLintConfig struct {
|
||||
rulesLintConfig
|
||||
|
||||
lookbackDelta model.Duration
|
||||
}
|
||||
|
||||
func newConfigLintConfig(optionsStr string, fatal, ignoreUnknownFields bool, lookbackDelta model.Duration) configLintConfig {
|
||||
c := configLintConfig{
|
||||
rulesLintConfig: rulesLintConfig{
|
||||
fatal: fatal,
|
||||
},
|
||||
}
|
||||
|
||||
lintNone := false
|
||||
var rulesOptions []string
|
||||
for _, option := range strings.Split(optionsStr, ",") {
|
||||
switch option {
|
||||
case lintOptionAll, lintOptionTooLongScrapeInterval:
|
||||
c.lookbackDelta = lookbackDelta
|
||||
if option == lintOptionAll {
|
||||
rulesOptions = append(rulesOptions, lintOptionAll)
|
||||
}
|
||||
case lintOptionNone:
|
||||
lintNone = true
|
||||
default:
|
||||
rulesOptions = append(rulesOptions, option)
|
||||
}
|
||||
}
|
||||
|
||||
if lintNone {
|
||||
c.lookbackDelta = 0
|
||||
rulesOptions = nil
|
||||
}
|
||||
|
||||
if len(rulesOptions) > 0 {
|
||||
c.rulesLintConfig = newRulesLintConfig(strings.Join(rulesOptions, ","), fatal, ignoreUnknownFields)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// CheckServerStatus - healthy & ready.
|
||||
func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper http.RoundTripper) error {
|
||||
if serverURL.Scheme == "" {
|
||||
|
@ -510,12 +568,12 @@ func CheckServerStatus(serverURL *url.URL, checkEndpoint string, roundTripper ht
|
|||
}
|
||||
|
||||
// CheckConfig validates configuration files.
|
||||
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files ...string) int {
|
||||
func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings configLintConfig, files ...string) int {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
|
||||
for _, f := range files {
|
||||
ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly)
|
||||
ruleFiles, scrapeConfigs, err := checkConfig(agentMode, f, checkSyntaxOnly)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
hasErrors = true
|
||||
|
@ -528,12 +586,12 @@ func CheckConfig(agentMode, checkSyntaxOnly bool, lintSettings lintConfig, files
|
|||
}
|
||||
fmt.Println()
|
||||
|
||||
rulesFailed, rulesHasErrors := checkRules(ruleFiles, lintSettings)
|
||||
if rulesFailed {
|
||||
failed = rulesFailed
|
||||
}
|
||||
if rulesHasErrors {
|
||||
hasErrors = rulesHasErrors
|
||||
if !checkSyntaxOnly {
|
||||
scrapeConfigsFailed := lintScrapeConfigs(scrapeConfigs, lintSettings)
|
||||
failed = failed || scrapeConfigsFailed
|
||||
rulesFailed, rulesHaveErrors := checkRules(ruleFiles, lintSettings.rulesLintConfig)
|
||||
failed = failed || rulesFailed
|
||||
hasErrors = hasErrors || rulesHaveErrors
|
||||
}
|
||||
}
|
||||
if failed && hasErrors {
|
||||
|
@ -572,12 +630,12 @@ func checkFileExists(fn string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
||||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, []*config.ScrapeConfig, error) {
|
||||
fmt.Println("Checking", filename)
|
||||
|
||||
cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger())
|
||||
cfg, err := config.LoadFile(filename, agentMode, promslog.NewNopLogger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var ruleFiles []string
|
||||
|
@ -585,15 +643,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
for _, rf := range cfg.RuleFiles {
|
||||
rfs, err := filepath.Glob(rf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
// If an explicit file was given, error if it is not accessible.
|
||||
if !strings.Contains(rf, "*") {
|
||||
if len(rfs) == 0 {
|
||||
return nil, fmt.Errorf("%q does not point to an existing file", rf)
|
||||
return nil, nil, fmt.Errorf("%q does not point to an existing file", rf)
|
||||
}
|
||||
if err := checkFileExists(rfs[0]); err != nil {
|
||||
return nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err)
|
||||
return nil, nil, fmt.Errorf("error checking rule file %q: %w", rfs[0], err)
|
||||
}
|
||||
}
|
||||
ruleFiles = append(ruleFiles, rfs...)
|
||||
|
@ -607,26 +665,26 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
var err error
|
||||
scfgs, err = cfg.GetScrapeConfigs()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading scrape configs: %w", err)
|
||||
return nil, nil, fmt.Errorf("error loading scrape configs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, scfg := range scfgs {
|
||||
if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil {
|
||||
if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil {
|
||||
return nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)
|
||||
return nil, nil, fmt.Errorf("error checking authorization credentials or bearer token file %q: %w", scfg.HTTPClientConfig.Authorization.CredentialsFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, c := range scfg.ServiceDiscoveryConfigs {
|
||||
switch c := c.(type) {
|
||||
case *kubernetes.SDConfig:
|
||||
if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
case *file.SDConfig:
|
||||
if checkSyntaxOnly {
|
||||
|
@ -635,17 +693,17 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
for _, file := range c.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
for _, f := range files {
|
||||
var targetGroups []*targetgroup.Group
|
||||
targetGroups, err = checkSDFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
}
|
||||
if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -654,7 +712,7 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
}
|
||||
case discovery.StaticConfig:
|
||||
if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -671,18 +729,18 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
for _, file := range c.Files {
|
||||
files, err := filepath.Glob(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(files) != 0 {
|
||||
for _, f := range files {
|
||||
var targetGroups []*targetgroup.Group
|
||||
targetGroups, err = checkSDFile(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
return nil, nil, fmt.Errorf("checking SD file %q: %w", file, err)
|
||||
}
|
||||
|
||||
if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -691,15 +749,15 @@ func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]strin
|
|||
}
|
||||
case discovery.StaticConfig:
|
||||
if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ruleFiles, nil
|
||||
return ruleFiles, scfgs, nil
|
||||
}
|
||||
|
||||
func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error {
|
||||
func checkTLSConfig(tlsConfig promconfig.TLSConfig, checkSyntaxOnly bool) error {
|
||||
if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 {
|
||||
return fmt.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile)
|
||||
}
|
||||
|
@ -758,7 +816,7 @@ func checkSDFile(filename string) ([]*targetgroup.Group, error) {
|
|||
}
|
||||
|
||||
// CheckRules validates rule files.
|
||||
func CheckRules(ls lintConfig, files ...string) int {
|
||||
func CheckRules(ls rulesLintConfig, files ...string) int {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
if len(files) == 0 {
|
||||
|
@ -778,7 +836,7 @@ func CheckRules(ls lintConfig, files ...string) int {
|
|||
}
|
||||
|
||||
// checkRulesFromStdin validates rule from stdin.
|
||||
func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
||||
func checkRulesFromStdin(ls rulesLintConfig) (bool, bool) {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
fmt.Println("Checking standard input")
|
||||
|
@ -787,7 +845,7 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
|||
fmt.Fprintln(os.Stderr, " FAILED:", err)
|
||||
return true, true
|
||||
}
|
||||
rgs, errs := rulefmt.Parse(data)
|
||||
rgs, errs := rulefmt.Parse(data, ls.ignoreUnknownFields)
|
||||
if errs != nil {
|
||||
failed = true
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
|
@ -816,12 +874,12 @@ func checkRulesFromStdin(ls lintConfig) (bool, bool) {
|
|||
}
|
||||
|
||||
// checkRules validates rule files.
|
||||
func checkRules(files []string, ls lintConfig) (bool, bool) {
|
||||
func checkRules(files []string, ls rulesLintConfig) (bool, bool) {
|
||||
failed := false
|
||||
hasErrors := false
|
||||
for _, f := range files {
|
||||
fmt.Println("Checking", f)
|
||||
rgs, errs := rulefmt.ParseFile(f)
|
||||
rgs, errs := rulefmt.ParseFile(f, ls.ignoreUnknownFields)
|
||||
if errs != nil {
|
||||
failed = true
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
|
@ -850,7 +908,7 @@ func checkRules(files []string, ls lintConfig) (bool, bool) {
|
|||
return failed, hasErrors
|
||||
}
|
||||
|
||||
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []error) {
|
||||
func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings rulesLintConfig) (int, []error) {
|
||||
numRules := 0
|
||||
for _, rg := range rgs.Groups {
|
||||
numRules += len(rg.Rules)
|
||||
|
@ -874,6 +932,16 @@ func checkRuleGroups(rgs *rulefmt.RuleGroups, lintSettings lintConfig) (int, []e
|
|||
return numRules, nil
|
||||
}
|
||||
|
||||
func lintScrapeConfigs(scrapeConfigs []*config.ScrapeConfig, lintSettings configLintConfig) bool {
|
||||
for _, scfg := range scrapeConfigs {
|
||||
if lintSettings.lookbackDelta > 0 && scfg.ScrapeInterval >= lintSettings.lookbackDelta {
|
||||
fmt.Fprintf(os.Stderr, " FAILED: too long scrape interval found, data point will be marked as stale - job: %s, interval: %s\n", scfg.JobName, scfg.ScrapeInterval)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type compareRuleType struct {
|
||||
metric string
|
||||
label labels.Labels
|
||||
|
@ -895,30 +963,30 @@ func compare(a, b compareRuleType) int {
|
|||
|
||||
func checkDuplicates(groups []rulefmt.RuleGroup) []compareRuleType {
|
||||
var duplicates []compareRuleType
|
||||
var rules compareRuleTypes
|
||||
var cRules compareRuleTypes
|
||||
|
||||
for _, group := range groups {
|
||||
for _, rule := range group.Rules {
|
||||
rules = append(rules, compareRuleType{
|
||||
cRules = append(cRules, compareRuleType{
|
||||
metric: ruleMetric(rule),
|
||||
label: labels.FromMap(rule.Labels),
|
||||
label: rules.FromMaps(group.Labels, rule.Labels),
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(rules) < 2 {
|
||||
if len(cRules) < 2 {
|
||||
return duplicates
|
||||
}
|
||||
sort.Sort(rules)
|
||||
sort.Sort(cRules)
|
||||
|
||||
last := rules[0]
|
||||
for i := 1; i < len(rules); i++ {
|
||||
if compare(last, rules[i]) == 0 {
|
||||
last := cRules[0]
|
||||
for i := 1; i < len(cRules); i++ {
|
||||
if compare(last, cRules[i]) == 0 {
|
||||
// Don't add a duplicated rule multiple times.
|
||||
if len(duplicates) == 0 || compare(last, duplicates[len(duplicates)-1]) != 0 {
|
||||
duplicates = append(duplicates, rules[i])
|
||||
duplicates = append(duplicates, cRules[i])
|
||||
}
|
||||
}
|
||||
last = rules[i]
|
||||
last = cRules[i]
|
||||
}
|
||||
|
||||
return duplicates
|
||||
|
|
|
@ -60,6 +60,7 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestQueryRange(t *testing.T) {
|
||||
t.Parallel()
|
||||
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "matrix", "result": []}}`)
|
||||
defer s.Close()
|
||||
|
||||
|
@ -83,6 +84,7 @@ func TestQueryRange(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestQueryInstant(t *testing.T) {
|
||||
t.Parallel()
|
||||
s, getRequest := mockServer(200, `{"status": "success", "data": {"resultType": "vector", "result": []}}`)
|
||||
defer s.Close()
|
||||
|
||||
|
@ -114,6 +116,7 @@ func mockServer(code int, body string) (*httptest.Server, func() *http.Request)
|
|||
}
|
||||
|
||||
func TestCheckSDFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
file string
|
||||
|
@ -144,6 +147,7 @@ func TestCheckSDFile(t *testing.T) {
|
|||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
_, err := checkSDFile(test.file)
|
||||
if test.err != "" {
|
||||
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||
|
@ -155,6 +159,7 @@ func TestCheckSDFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCheckDuplicates(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
ruleFile string
|
||||
|
@ -179,7 +184,8 @@ func TestCheckDuplicates(t *testing.T) {
|
|||
for _, test := range cases {
|
||||
c := test
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
rgs, err := rulefmt.ParseFile(c.ruleFile)
|
||||
t.Parallel()
|
||||
rgs, err := rulefmt.ParseFile(c.ruleFile, false)
|
||||
require.Empty(t, err)
|
||||
dups := checkDuplicates(rgs.Groups)
|
||||
require.Equal(t, c.expectedDups, dups)
|
||||
|
@ -188,7 +194,7 @@ func TestCheckDuplicates(t *testing.T) {
|
|||
}
|
||||
|
||||
func BenchmarkCheckDuplicates(b *testing.B) {
|
||||
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml")
|
||||
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false)
|
||||
require.Empty(b, err)
|
||||
b.ResetTimer()
|
||||
|
||||
|
@ -198,6 +204,7 @@ func BenchmarkCheckDuplicates(b *testing.B) {
|
|||
}
|
||||
|
||||
func TestCheckTargetConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
file string
|
||||
|
@ -226,7 +233,8 @@ func TestCheckTargetConfig(t *testing.T) {
|
|||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
t.Parallel()
|
||||
_, _, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
if test.err != "" {
|
||||
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
|
@ -237,6 +245,7 @@ func TestCheckTargetConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCheckConfigSyntax(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
file string
|
||||
|
@ -309,7 +318,8 @@ func TestCheckConfigSyntax(t *testing.T) {
|
|||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
||||
t.Parallel()
|
||||
_, _, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
|
||||
expectedErrMsg := test.err
|
||||
if strings.Contains(runtime.GOOS, "windows") {
|
||||
expectedErrMsg = test.errWindows
|
||||
|
@ -324,6 +334,7 @@ func TestCheckConfigSyntax(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuthorizationConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
cases := []struct {
|
||||
name string
|
||||
file string
|
||||
|
@ -343,7 +354,8 @@ func TestAuthorizationConfig(t *testing.T) {
|
|||
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
t.Parallel()
|
||||
_, _, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
if test.err != "" {
|
||||
require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||
return
|
||||
|
@ -357,6 +369,7 @@ func TestCheckMetricsExtended(t *testing.T) {
|
|||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Skipping on windows")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
f, err := os.Open("testdata/metrics-test.prom")
|
||||
require.NoError(t, err)
|
||||
|
@ -393,6 +406,7 @@ func TestExitCodes(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
for _, c := range []struct {
|
||||
file string
|
||||
|
@ -417,8 +431,10 @@ func TestExitCodes(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(c.file, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, lintFatal := range []bool{true, false} {
|
||||
t.Run(strconv.FormatBool(lintFatal), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
args := []string{"-test.main", "check", "config", "testdata/" + c.file}
|
||||
if lintFatal {
|
||||
args = append(args, "--lint-fatal")
|
||||
|
@ -449,6 +465,7 @@ func TestDocumentation(t *testing.T) {
|
|||
if runtime.GOOS == "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
@ -491,7 +508,7 @@ func TestCheckRules(t *testing.T) {
|
|||
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||
require.Equal(t, successExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
|
@ -513,7 +530,7 @@ func TestCheckRules(t *testing.T) {
|
|||
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false))
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false))
|
||||
require.Equal(t, failureExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
|
@ -535,32 +552,79 @@ func TestCheckRules(t *testing.T) {
|
|||
defer func(v *os.File) { os.Stdin = v }(os.Stdin)
|
||||
os.Stdin = r
|
||||
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true))
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false))
|
||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckRulesWithRuleFiles(t *testing.T) {
|
||||
t.Run("rules-good", func(t *testing.T) {
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules.yml")
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml")
|
||||
require.Equal(t, successExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
t.Run("rules-bad", func(t *testing.T) {
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, false), "./testdata/rules-bad.yml")
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml")
|
||||
require.Equal(t, failureExitCode, exitCode, "")
|
||||
})
|
||||
|
||||
t.Run("rules-lint-fatal", func(t *testing.T) {
|
||||
exitCode := CheckRules(newLintConfig(lintOptionDuplicateRules, true), "./testdata/prometheus-rules.lint.yml")
|
||||
t.Parallel()
|
||||
exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml")
|
||||
require.Equal(t, lintErrExitCode, exitCode, "")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckScrapeConfigs(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
lookbackDelta model.Duration
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "scrape interval less than lookback delta",
|
||||
lookbackDelta: model.Duration(11 * time.Minute),
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "scrape interval greater than lookback delta",
|
||||
lookbackDelta: model.Duration(5 * time.Minute),
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "scrape interval same as lookback delta",
|
||||
lookbackDelta: model.Duration(10 * time.Minute),
|
||||
expectError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Non-fatal linting.
|
||||
code := CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, false, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
require.Equal(t, successExitCode, code, "Non-fatal linting should return success")
|
||||
// Fatal linting.
|
||||
code = CheckConfig(false, false, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
if tc.expectError {
|
||||
require.Equal(t, lintErrExitCode, code, "Fatal linting should return error")
|
||||
} else {
|
||||
require.Equal(t, successExitCode, code, "Fatal linting should return success when there are no problems")
|
||||
}
|
||||
// Check syntax only, no linting.
|
||||
code = CheckConfig(false, true, newConfigLintConfig(lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
require.Equal(t, successExitCode, code, "Fatal linting should return success when checking syntax only")
|
||||
// Lint option "none" should disable linting.
|
||||
code = CheckConfig(false, false, newConfigLintConfig(lintOptionNone+","+lintOptionTooLongScrapeInterval, true, false, tc.lookbackDelta), "./testdata/prometheus-config.lint.too_long_scrape_interval.yml")
|
||||
require.Equal(t, successExitCode, code, `Fatal linting should return success when lint option "none" is specified`)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTSDBDumpCommand(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
storage := promqltest.LoadedStorage(t, `
|
||||
load 1m
|
||||
|
@ -593,6 +657,7 @@ func TestTSDBDumpCommand(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
args := []string{"-test.main", "tsdb", c.subCmd, storage.Dir()}
|
||||
cmd := exec.Command(promtoolPath, args...)
|
||||
require.NoError(t, cmd.Run())
|
||||
|
|
|
@ -69,7 +69,7 @@ func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient q
|
|||
|
||||
// loadGroups parses groups from a list of recording rule files.
|
||||
func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string) (errs []error) {
|
||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, filenames...)
|
||||
groups, errs := importer.ruleManager.LoadGroups(importer.config.evalInterval, labels.Labels{}, "", nil, false, filenames...)
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ const defaultBlockDuration = time.Duration(tsdb.DefaultBlockDuration) * time.Mil
|
|||
|
||||
// TestBackfillRuleIntegration is an integration test that runs all the rule importer code to confirm the parts work together.
|
||||
func TestBackfillRuleIntegration(t *testing.T) {
|
||||
t.Parallel()
|
||||
const (
|
||||
testMaxSampleCount = 50
|
||||
testValue = 123
|
||||
|
@ -72,6 +73,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpDir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -210,6 +212,7 @@ func createMultiRuleTestFiles(path string) error {
|
|||
// TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
|
||||
// received from Prometheus Query API, including the __name__ label.
|
||||
func TestBackfillLabels(t *testing.T) {
|
||||
t.Parallel()
|
||||
tmpDir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -251,6 +254,7 @@ func TestBackfillLabels(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
t.Run("correct-labels", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
selectedSeries := q.Select(ctx, false, nil, labels.MustNewMatcher(labels.MatchRegexp, "", ".*"))
|
||||
for selectedSeries.Next() {
|
||||
series := selectedSeries.At()
|
||||
|
|
|
@ -41,7 +41,7 @@ type sdCheckResult struct {
|
|||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
|
||||
logger := promslog.New(&promslog.Config{})
|
||||
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, logger)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot load config", err)
|
||||
return failureExitCode
|
||||
|
@ -144,7 +144,9 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
|
|||
}
|
||||
}
|
||||
|
||||
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
|
||||
scrape.PopulateDiscoveredLabels(lb, scrapeConfig, target, targetGroup.Labels)
|
||||
orig := lb.Labels()
|
||||
res, err := scrape.PopulateLabels(lb, scrapeConfig, target, targetGroup.Labels)
|
||||
result := sdCheckResult{
|
||||
DiscoveredLabels: orig,
|
||||
Labels: res,
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
)
|
||||
|
||||
func TestSDCheckResult(t *testing.T) {
|
||||
t.Parallel()
|
||||
targetGroups := []*targetgroup.Group{{
|
||||
Targets: []model.LabelSet{
|
||||
map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},
|
||||
|
|
|
@ -6,7 +6,7 @@ scrape_configs:
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
api_version: v1
|
||||
api_version: v2
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- nonexistent_file.yml
|
||||
|
|
3
cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml
vendored
Normal file
3
cmd/promtool/testdata/prometheus-config.lint.too_long_scrape_interval.yml
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
scrape_configs:
|
||||
- job_name: too_long_scrape_interval_test
|
||||
scrape_interval: 10m
|
33
cmd/promtool/testdata/rules_extrafields.yml
vendored
Normal file
33
cmd/promtool/testdata/rules_extrafields.yml
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
# This is the rules file. It has an extra "ownership"
|
||||
# field in the second group. promtool should ignore this field
|
||||
# and not return an error with --ignore-unknown-fields.
|
||||
|
||||
groups:
|
||||
- name: alerts
|
||||
namespace: "foobar"
|
||||
rules:
|
||||
- alert: InstanceDown
|
||||
expr: up == 0
|
||||
for: 5m
|
||||
labels:
|
||||
severity: page
|
||||
annotations:
|
||||
summary: "Instance {{ $labels.instance }} down"
|
||||
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes."
|
||||
- alert: AlwaysFiring
|
||||
expr: 1
|
||||
|
||||
- name: rules
|
||||
ownership:
|
||||
service: "test"
|
||||
rules:
|
||||
- record: job:test:count_over_time1m
|
||||
expr: sum without(instance) (count_over_time(test[1m]))
|
||||
|
||||
# A recording rule that doesn't depend on input series.
|
||||
- record: fixed_data
|
||||
expr: 1
|
||||
|
||||
# Subquery with default resolution test.
|
||||
- record: suquery_interval_test
|
||||
expr: count_over_time(up[5m:])
|
21
cmd/promtool/testdata/rules_run_extrafields.yml
vendored
Normal file
21
cmd/promtool/testdata/rules_run_extrafields.yml
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Minimal test case to see that --ignore-unknown-fields
|
||||
# is working as expected. It should not return an error
|
||||
# when any extra fields are present in the rules file.
|
||||
rule_files:
|
||||
- rules_extrafields.yml
|
||||
|
||||
evaluation_interval: 1m
|
||||
|
||||
|
||||
tests:
|
||||
- name: extra ownership field test
|
||||
input_series:
|
||||
- series: test
|
||||
values: 1
|
||||
|
||||
promql_expr_test:
|
||||
- expr: test
|
||||
eval_time: 0
|
||||
exp_samples:
|
||||
- value: 1
|
||||
labels: test
|
|
@ -315,12 +315,11 @@ func readPrometheusLabels(r io.Reader, n int) ([]labels.Labels, error) {
|
|||
i := 0
|
||||
|
||||
for scanner.Scan() && i < n {
|
||||
m := make([]labels.Label, 0, 10)
|
||||
|
||||
r := strings.NewReplacer("\"", "", "{", "", "}", "")
|
||||
s := r.Replace(scanner.Text())
|
||||
|
||||
labelChunks := strings.Split(s, ",")
|
||||
m := make([]labels.Label, 0, len(labelChunks))
|
||||
for _, labelChunk := range labelChunks {
|
||||
split := strings.Split(labelChunk, ":")
|
||||
m = append(m, labels.Label{Name: split[0], Value: split[1]})
|
||||
|
@ -405,7 +404,7 @@ func openBlock(path, blockID string) (*tsdb.DBReadOnly, tsdb.BlockReader, error)
|
|||
}
|
||||
}
|
||||
|
||||
b, err := db.Block(blockID)
|
||||
b, err := db.Block(blockID, tsdb.DefaultPostingsDecoderFactory)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -589,7 +588,10 @@ func analyzeBlock(ctx context.Context, path, blockID string, limit int, runExten
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
postings = index.Intersect(postings, index.NewListPostings(refs))
|
||||
// Only intersect postings if matchers are specified.
|
||||
if len(matchers) > 0 {
|
||||
postings = index.Intersect(postings, index.NewListPostings(refs))
|
||||
}
|
||||
count := 0
|
||||
for postings.Next() {
|
||||
count++
|
||||
|
@ -662,7 +664,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||
fhchk, ok := chk.(*chunkenc.FloatHistogramChunk)
|
||||
if !ok {
|
||||
return fmt.Errorf("chunk is not FloatHistogramChunk")
|
||||
return errors.New("chunk is not FloatHistogramChunk")
|
||||
}
|
||||
it := fhchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
|
@ -677,7 +679,7 @@ func analyzeCompaction(ctx context.Context, block tsdb.BlockReader, indexr tsdb.
|
|||
histogramChunkSize = append(histogramChunkSize, len(chk.Bytes()))
|
||||
hchk, ok := chk.(*chunkenc.HistogramChunk)
|
||||
if !ok {
|
||||
return fmt.Errorf("chunk is not HistogramChunk")
|
||||
return errors.New("chunk is not HistogramChunk")
|
||||
}
|
||||
it := hchk.Iterator(nil)
|
||||
bucketCount := 0
|
||||
|
@ -733,7 +735,7 @@ func dumpSamples(ctx context.Context, dbDir, sandboxDirRoot string, mint, maxt i
|
|||
for _, mset := range matcherSets {
|
||||
sets = append(sets, q.Select(ctx, true, nil, mset...))
|
||||
}
|
||||
ss = storage.NewMergeSeriesSet(sets, storage.ChainedSeriesMerge)
|
||||
ss = storage.NewMergeSeriesSet(sets, 0, storage.ChainedSeriesMerge)
|
||||
} else {
|
||||
ss = q.Select(ctx, false, nil, matcherSets[0]...)
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
)
|
||||
|
||||
func TestGenerateBucket(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcs := []struct {
|
||||
min, max int
|
||||
start, end, step int
|
||||
|
|
|
@ -46,11 +46,11 @@ import (
|
|||
|
||||
// RulesUnitTest does unit testing of rules based on the unit testing files provided.
|
||||
// More info about the file format can be found in the docs.
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, files...)
|
||||
func RulesUnitTest(queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
|
||||
return RulesUnitTestResult(io.Discard, queryOpts, runStrings, diffFlag, debug, ignoreUnknownFields, files...)
|
||||
}
|
||||
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag bool, files ...string) int {
|
||||
func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts, runStrings []string, diffFlag, debug, ignoreUnknownFields bool, files ...string) int {
|
||||
failed := false
|
||||
junit := &junitxml.JUnitXML{}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
|||
}
|
||||
|
||||
for _, f := range files {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, junit.Suite(f)); errs != nil {
|
||||
if errs := ruleUnitTest(f, queryOpts, run, diffFlag, debug, ignoreUnknownFields, junit.Suite(f)); errs != nil {
|
||||
fmt.Fprintln(os.Stderr, " FAILED:")
|
||||
for _, e := range errs {
|
||||
fmt.Fprintln(os.Stderr, e.Error())
|
||||
|
@ -82,7 +82,7 @@ func RulesUnitTestResult(results io.Writer, queryOpts promqltest.LazyLoaderOpts,
|
|||
return successExitCode
|
||||
}
|
||||
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag bool, ts *junitxml.TestSuite) []error {
|
||||
func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *regexp.Regexp, diffFlag, debug, ignoreUnknownFields bool, ts *junitxml.TestSuite) []error {
|
||||
b, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
ts.Abort(err)
|
||||
|
@ -131,7 +131,7 @@ func ruleUnitTest(filename string, queryOpts promqltest.LazyLoaderOpts, run *reg
|
|||
if t.Interval == 0 {
|
||||
t.Interval = unitTestInp.EvaluationInterval
|
||||
}
|
||||
ers := t.test(evalInterval, groupOrderMap, queryOpts, diffFlag, unitTestInp.RuleFiles...)
|
||||
ers := t.test(testname, evalInterval, groupOrderMap, queryOpts, diffFlag, debug, ignoreUnknownFields, unitTestInp.RuleFiles...)
|
||||
if ers != nil {
|
||||
for _, e := range ers {
|
||||
tc.Fail(e.Error())
|
||||
|
@ -198,7 +198,14 @@ type testGroup struct {
|
|||
}
|
||||
|
||||
// test performs the unit tests.
|
||||
func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag bool, ruleFiles ...string) (outErr []error) {
|
||||
func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrderMap map[string]int, queryOpts promqltest.LazyLoaderOpts, diffFlag, debug, ignoreUnknownFields bool, ruleFiles ...string) (outErr []error) {
|
||||
if debug {
|
||||
testStart := time.Now()
|
||||
fmt.Printf("DEBUG: Starting test %s\n", testname)
|
||||
defer func() {
|
||||
fmt.Printf("DEBUG: Test %s finished, took %v\n", testname, time.Since(testStart))
|
||||
}()
|
||||
}
|
||||
// Setup testing suite.
|
||||
suite, err := promqltest.NewLazyLoader(tg.seriesLoadingString(), queryOpts)
|
||||
if err != nil {
|
||||
|
@ -221,7 +228,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||
Logger: promslog.NewNopLogger(),
|
||||
}
|
||||
m := rules.NewManager(opts)
|
||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ignoreUnknownFields, ruleFiles...)
|
||||
if ers != nil {
|
||||
return ers
|
||||
}
|
||||
|
@ -482,6 +489,32 @@ Outer:
|
|||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
ts := tg.maxEvalTime()
|
||||
// Potentially a test can be specified at a time with fractional seconds,
|
||||
// which PromQL cannot represent, so round up to the next whole second.
|
||||
ts = (ts + time.Second).Truncate(time.Second)
|
||||
expr := fmt.Sprintf(`{__name__=~".+"}[%v]`, ts)
|
||||
q, err := suite.QueryEngine().NewInstantQuery(context.Background(), suite.Queryable(), nil, expr, mint.Add(ts))
|
||||
if err != nil {
|
||||
fmt.Printf("DEBUG: Failed querying, expr: %q, err: %v\n", expr, err)
|
||||
return errs
|
||||
}
|
||||
res := q.Exec(suite.Context())
|
||||
if res.Err != nil {
|
||||
fmt.Printf("DEBUG: Failed query exec, expr: %q, err: %v\n", expr, res.Err)
|
||||
return errs
|
||||
}
|
||||
switch v := res.Value.(type) {
|
||||
case promql.Matrix:
|
||||
fmt.Printf("DEBUG: Dump of all data (input_series and rules) at %v:\n", ts)
|
||||
fmt.Println(v.String())
|
||||
default:
|
||||
fmt.Printf("DEBUG: Got unexpected type %T\n", v)
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRulesUnitTest(t *testing.T) {
|
||||
t.Parallel()
|
||||
type args struct {
|
||||
files []string
|
||||
}
|
||||
|
@ -141,14 +142,16 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
reuseCount[tt.want] += len(tt.args.files)
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, tt.args.files...); got != tt.want {
|
||||
t.Parallel()
|
||||
if got := RulesUnitTest(tt.queryOpts, nil, false, false, false, tt.args.files...); got != tt.want {
|
||||
t.Errorf("RulesUnitTest() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run("Junit xml output ", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var buf bytes.Buffer
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, reuseFiles...); got != 1 {
|
||||
if got := RulesUnitTestResult(&buf, promqltest.LazyLoaderOpts{}, nil, false, false, false, reuseFiles...); got != 1 {
|
||||
t.Errorf("RulesUnitTestResults() = %v, want 1", got)
|
||||
}
|
||||
var test junitxml.JUnitXML
|
||||
|
@ -185,15 +188,17 @@ func TestRulesUnitTest(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRulesUnitTestRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
type args struct {
|
||||
run []string
|
||||
files []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
queryOpts promqltest.LazyLoaderOpts
|
||||
want int
|
||||
name string
|
||||
args args
|
||||
queryOpts promqltest.LazyLoaderOpts
|
||||
want int
|
||||
ignoreUnknownFields bool
|
||||
}{
|
||||
{
|
||||
name: "Test all without run arg",
|
||||
|
@ -227,10 +232,19 @@ func TestRulesUnitTestRun(t *testing.T) {
|
|||
},
|
||||
want: 1,
|
||||
},
|
||||
{
|
||||
name: "Test all with extra fields",
|
||||
args: args{
|
||||
files: []string{"./testdata/rules_run_extrafields.yml"},
|
||||
},
|
||||
ignoreUnknownFields: true,
|
||||
want: 0,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, tt.args.files...)
|
||||
t.Parallel()
|
||||
got := RulesUnitTest(tt.queryOpts, tt.args.run, false, false, tt.ignoreUnknownFields, tt.args.files...)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
|
|
149
config/config.go
149
config/config.go
|
@ -17,6 +17,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"mime"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -29,7 +30,7 @@ import (
|
|||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/sigv4"
|
||||
"github.com/prometheus/sigv4"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -72,7 +73,7 @@ const (
|
|||
)
|
||||
|
||||
// Load parses the YAML input s into a Config.
|
||||
func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||
func Load(s string, logger *slog.Logger) (*Config, error) {
|
||||
cfg := &Config{}
|
||||
// If the entire config body is empty the UnmarshalYAML method is
|
||||
// never called. We thus have to set the DefaultConfig at the entry
|
||||
|
@ -84,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, er
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if !expandExternalLabels {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
b := labels.NewScratchBuilder(0)
|
||||
cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) {
|
||||
newV := os.Expand(v.Value, func(s string) string {
|
||||
|
@ -106,17 +103,32 @@ func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, er
|
|||
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
||||
b.Add(v.Name, newV)
|
||||
})
|
||||
cfg.GlobalConfig.ExternalLabels = b.Labels()
|
||||
if !b.Labels().IsEmpty() {
|
||||
cfg.GlobalConfig.ExternalLabels = b.Labels()
|
||||
}
|
||||
|
||||
switch cfg.OTLPConfig.TranslationStrategy {
|
||||
case UnderscoreEscapingWithSuffixes:
|
||||
case "":
|
||||
case NoUTF8EscapingWithSuffixes:
|
||||
if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig {
|
||||
return nil, errors.New("OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy)
|
||||
}
|
||||
cfg.loaded = true
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// LoadFile parses the given YAML file into a Config.
|
||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||
// LoadFile parses and validates the given YAML file into a read-only Config.
|
||||
// Callers should never write to or shallow copy the returned Config.
|
||||
func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) {
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg, err := Load(string(content), expandExternalLabels, logger)
|
||||
cfg, err := Load(string(content), logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err)
|
||||
}
|
||||
|
@ -165,13 +177,13 @@ var (
|
|||
// DefaultScrapeConfig is the default scrape configuration.
|
||||
DefaultScrapeConfig = ScrapeConfig{
|
||||
// ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals.
|
||||
ScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
EnableCompression: true,
|
||||
AlwaysScrapeClassicHistograms: false,
|
||||
MetricsPath: "/metrics",
|
||||
Scheme: "http",
|
||||
HonorLabels: false,
|
||||
HonorTimestamps: true,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
EnableCompression: true,
|
||||
}
|
||||
|
||||
// DefaultAlertmanagerConfig is the default alertmanager configuration.
|
||||
|
@ -182,13 +194,18 @@ var (
|
|||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
||||
DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: false,
|
||||
}
|
||||
|
||||
// DefaultRemoteWriteConfig is the default remote write configuration.
|
||||
DefaultRemoteWriteConfig = RemoteWriteConfig{
|
||||
RemoteTimeout: model.Duration(30 * time.Second),
|
||||
ProtobufMessage: RemoteWriteProtoMsgV1,
|
||||
QueueConfig: DefaultQueueConfig,
|
||||
MetadataConfig: DefaultMetadataConfig,
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig,
|
||||
}
|
||||
|
||||
// DefaultQueueConfig is the default remote queue configuration.
|
||||
|
@ -235,7 +252,9 @@ var (
|
|||
}
|
||||
|
||||
// DefaultOTLPConfig is the default OTLP configuration.
|
||||
DefaultOTLPConfig = OTLPConfig{}
|
||||
DefaultOTLPConfig = OTLPConfig{
|
||||
TranslationStrategy: UnderscoreEscapingWithSuffixes,
|
||||
}
|
||||
)
|
||||
|
||||
// Config is the top-level configuration for Prometheus's config files.
|
||||
|
@ -252,9 +271,12 @@ type Config struct {
|
|||
RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"`
|
||||
RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"`
|
||||
OTLPConfig OTLPConfig `yaml:"otlp,omitempty"`
|
||||
|
||||
loaded bool // Certain methods require configuration to use Load validation.
|
||||
}
|
||||
|
||||
// SetDirectory joins any relative file paths with dir.
|
||||
// This method writes to config, and it's not concurrency safe.
|
||||
func (c *Config) SetDirectory(dir string) {
|
||||
c.GlobalConfig.SetDirectory(dir)
|
||||
c.AlertingConfig.SetDirectory(dir)
|
||||
|
@ -284,24 +306,26 @@ func (c Config) String() string {
|
|||
return string(b)
|
||||
}
|
||||
|
||||
// GetScrapeConfigs returns the scrape configurations.
|
||||
// GetScrapeConfigs returns the read-only, validated scrape configurations including
|
||||
// the ones from the scrape_config_files.
|
||||
// This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency).
|
||||
// This method also assumes the Config was created by Load or LoadFile function, it returns error
|
||||
// if it was not. We can't re-validate or apply globals here due to races,
|
||||
// read more https://github.com/prometheus/prometheus/issues/15538.
|
||||
func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
||||
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
||||
if !c.loaded {
|
||||
// Programmatic error, we warn before more confusing errors would happen due to lack of the globalization.
|
||||
return nil, errors.New("scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen")
|
||||
}
|
||||
|
||||
scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs))
|
||||
jobNames := map[string]string{}
|
||||
for i, scfg := range c.ScrapeConfigs {
|
||||
// We do these checks for library users that would not call validate in
|
||||
// Unmarshal.
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := jobNames[scfg.JobName]; ok {
|
||||
return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
|
||||
}
|
||||
jobNames[scfg.JobName] = "main config file"
|
||||
scfgs[i] = scfg
|
||||
}
|
||||
|
||||
// Re-read and validate the dynamic scrape config rules.
|
||||
for _, pat := range c.ScrapeConfigFiles {
|
||||
fs, err := filepath.Glob(pat)
|
||||
if err != nil {
|
||||
|
@ -337,6 +361,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) {
|
|||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead.
|
||||
func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
*c = DefaultConfig
|
||||
// We want to set c to the defaults and then overwrite it with the input.
|
||||
|
@ -373,18 +398,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Do global overrides and validate unique names.
|
||||
// Do global overrides and validation.
|
||||
jobNames := map[string]struct{}{}
|
||||
for _, scfg := range c.ScrapeConfigs {
|
||||
if err := scfg.Validate(c.GlobalConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := jobNames[scfg.JobName]; ok {
|
||||
return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName)
|
||||
}
|
||||
jobNames[scfg.JobName] = struct{}{}
|
||||
}
|
||||
|
||||
rwNames := map[string]struct{}{}
|
||||
for _, rwcfg := range c.RemoteWriteConfigs {
|
||||
if rwcfg == nil {
|
||||
|
@ -475,9 +500,22 @@ func (s ScrapeProtocol) Validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol.
|
||||
func (s ScrapeProtocol) HeaderMediaType() string {
|
||||
if _, ok := ScrapeProtocolsHeaders[s]; !ok {
|
||||
return ""
|
||||
}
|
||||
mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s])
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return mediaType
|
||||
}
|
||||
|
||||
var (
|
||||
PrometheusProto ScrapeProtocol = "PrometheusProto"
|
||||
PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4"
|
||||
PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0"
|
||||
OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1"
|
||||
OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0"
|
||||
UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8
|
||||
|
@ -485,6 +523,7 @@ var (
|
|||
ScrapeProtocolsHeaders = map[ScrapeProtocol]string{
|
||||
PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited",
|
||||
PrometheusText0_0_4: "text/plain;version=0.0.4",
|
||||
PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8",
|
||||
OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1",
|
||||
OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0",
|
||||
}
|
||||
|
@ -494,6 +533,7 @@ var (
|
|||
DefaultScrapeProtocols = []ScrapeProtocol{
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText1_0_0,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
|
||||
|
@ -505,6 +545,7 @@ var (
|
|||
PrometheusProto,
|
||||
OpenMetricsText1_0_0,
|
||||
OpenMetricsText0_0_1,
|
||||
PrometheusText1_0_0,
|
||||
PrometheusText0_0_4,
|
||||
}
|
||||
)
|
||||
|
@ -631,10 +672,17 @@ type ScrapeConfig struct {
|
|||
// The protocols to negotiate during a scrape. It tells clients what
|
||||
// protocol are accepted by Prometheus and with what preference (most wanted is first).
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText0.0.4.
|
||||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"`
|
||||
// Whether to scrape a classic histogram that is also exposed as a native histogram.
|
||||
ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"`
|
||||
// The fallback protocol to use if the Content-Type provided by the target
|
||||
// is not provided, blank, or not one of the expected values.
|
||||
// Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1,
|
||||
// OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4.
|
||||
ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"`
|
||||
// Whether to scrape a classic histogram, even if it is also exposed as a native histogram.
|
||||
AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"`
|
||||
// Whether to convert all scraped classic histograms into a native histogram with custom buckets.
|
||||
ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"`
|
||||
// File to which scrape failures are logged.
|
||||
ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"`
|
||||
// The HTTP resource path on which to fetch metrics from targets.
|
||||
|
@ -782,6 +830,12 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error {
|
|||
return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName)
|
||||
}
|
||||
|
||||
if c.ScrapeFallbackProtocol != "" {
|
||||
if err := c.ScrapeFallbackProtocol.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err)
|
||||
}
|
||||
}
|
||||
|
||||
switch globalConfig.MetricNameValidationScheme {
|
||||
case LegacyValidationConfig:
|
||||
case "", UTF8ValidationConfig:
|
||||
|
@ -957,6 +1011,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig {
|
|||
|
||||
// AlertmanagerAPIVersion represents a version of the
|
||||
// github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'.
|
||||
// 'v1' is no longer supported.
|
||||
type AlertmanagerAPIVersion string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -986,7 +1041,7 @@ const (
|
|||
)
|
||||
|
||||
var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{
|
||||
AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2,
|
||||
AlertmanagerAPIVersionV2,
|
||||
}
|
||||
|
||||
// AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with.
|
||||
|
@ -1038,7 +1093,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er
|
|||
c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil
|
||||
|
||||
if httpClientConfigAuthEnabled && c.SigV4Config != nil {
|
||||
return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||
return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured")
|
||||
}
|
||||
|
||||
// Check for users putting URLs in target groups.
|
||||
|
@ -1147,6 +1202,7 @@ type RemoteWriteConfig struct {
|
|||
Name string `yaml:"name,omitempty"`
|
||||
SendExemplars bool `yaml:"send_exemplars,omitempty"`
|
||||
SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"`
|
||||
RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"`
|
||||
// ProtobufMessage specifies the protobuf message to use against the remote
|
||||
// receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/
|
||||
ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"`
|
||||
|
@ -1368,9 +1424,24 @@ func getGoGCEnv() int {
|
|||
return DefaultRuntimeConfig.GoGC
|
||||
}
|
||||
|
||||
type translationStrategyOption string
|
||||
|
||||
var (
|
||||
// NoUTF8EscapingWithSuffixes will accept metric/label names as they are.
|
||||
// Unit and type suffixes may be added to metric names, according to certain rules.
|
||||
NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes"
|
||||
// UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus.
|
||||
// This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores,
|
||||
// and label name characters that are not alphanumerics/underscores to underscores.
|
||||
// Unit and type suffixes may be appended to metric names, according to certain rules.
|
||||
UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes"
|
||||
)
|
||||
|
||||
// OTLPConfig is the configuration for writing to the OTLP endpoint.
|
||||
type OTLPConfig struct {
|
||||
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
||||
PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"`
|
||||
TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"`
|
||||
KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
|
@ -1386,7 +1457,7 @@ func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
for i, attr := range c.PromoteResourceAttributes {
|
||||
attr = strings.TrimSpace(attr)
|
||||
if attr == "" {
|
||||
err = errors.Join(err, fmt.Errorf("empty promoted OTel resource attribute"))
|
||||
err = errors.Join(err, errors.New("empty promoted OTel resource attribute"))
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[attr]; exists {
|
||||
|
|
|
@ -18,6 +18,8 @@ package config
|
|||
const ruleFilesConfigFile = "testdata/rules_abs_path.good.yml"
|
||||
|
||||
var ruleFilesExpectedConf = &Config{
|
||||
loaded: true,
|
||||
|
||||
GlobalConfig: DefaultGlobalConfig,
|
||||
Runtime: DefaultRuntimeConfig,
|
||||
RuleFiles: []string{
|
||||
|
|
|
@ -87,6 +87,7 @@ const (
|
|||
)
|
||||
|
||||
var expectedConf = &Config{
|
||||
loaded: true,
|
||||
GlobalConfig: GlobalConfig{
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
|
@ -142,7 +143,7 @@ var expectedConf = &Config{
|
|||
},
|
||||
},
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: true,
|
||||
EnableHTTP2: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -158,7 +159,7 @@ var expectedConf = &Config{
|
|||
KeyFile: filepath.FromSlash("testdata/valid_key_file"),
|
||||
},
|
||||
FollowRedirects: true,
|
||||
EnableHTTP2: true,
|
||||
EnableHTTP2: false,
|
||||
},
|
||||
Headers: map[string]string{"name": "value"},
|
||||
},
|
||||
|
@ -168,6 +169,7 @@ var expectedConf = &Config{
|
|||
PromoteResourceAttributes: []string{
|
||||
"k8s.cluster.name", "k8s.job.name", "k8s.namespace.name",
|
||||
},
|
||||
TranslationStrategy: UnderscoreEscapingWithSuffixes,
|
||||
},
|
||||
|
||||
RemoteReadConfigs: []*RemoteReadConfig{
|
||||
|
@ -206,19 +208,20 @@ var expectedConf = &Config{
|
|||
{
|
||||
JobName: "prometheus",
|
||||
|
||||
HonorLabels: true,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
EnableCompression: true,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||
HonorLabels: true,
|
||||
HonorTimestamps: true,
|
||||
ScrapeInterval: model.Duration(15 * time.Second),
|
||||
ScrapeTimeout: DefaultGlobalConfig.ScrapeTimeout,
|
||||
EnableCompression: true,
|
||||
BodySizeLimit: globBodySizeLimit,
|
||||
SampleLimit: globSampleLimit,
|
||||
TargetLimit: globTargetLimit,
|
||||
LabelLimit: globLabelLimit,
|
||||
LabelNameLengthLimit: globLabelNameLengthLimit,
|
||||
LabelValueLengthLimit: globLabelValueLengthLimit,
|
||||
ScrapeProtocols: DefaultGlobalConfig.ScrapeProtocols,
|
||||
ScrapeFallbackProtocol: PrometheusText0_0_4,
|
||||
ScrapeFailureLogFile: "testdata/fail_prom.log",
|
||||
|
||||
MetricsPath: DefaultScrapeConfig.MetricsPath,
|
||||
Scheme: DefaultScrapeConfig.Scheme,
|
||||
|
@ -1500,28 +1503,33 @@ var expectedConf = &Config{
|
|||
},
|
||||
}
|
||||
|
||||
func TestYAMLNotLongerSupportedAMApi(t *testing.T) {
|
||||
_, err := LoadFile("testdata/config_with_no_longer_supported_am_api_config.yml", false, promslog.NewNopLogger())
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestYAMLRoundtrip(t *testing.T) {
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
||||
require.NoError(t, err)
|
||||
got := &Config{}
|
||||
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
||||
|
||||
got, err := Load(string(out), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
||||
require.NoError(t, err)
|
||||
got := &Config{}
|
||||
require.NoError(t, yaml.UnmarshalStrict(out, got))
|
||||
|
||||
got, err := Load(string(out), promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, got.RemoteWriteConfigs[0].QueueConfig.RetryOnRateLimit)
|
||||
require.False(t, got.RemoteWriteConfigs[1].QueueConfig.RetryOnRateLimit)
|
||||
|
@ -1529,7 +1537,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
|||
|
||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1541,25 +1549,101 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestOTLPAllowServiceNameInTargetInfo(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_allow_keep_identifying_resource_attributes.good.yml"), false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
require.NoError(t, err)
|
||||
var got Config
|
||||
require.NoError(t, yaml.UnmarshalStrict(out, &got))
|
||||
|
||||
require.True(t, got.OTLPConfig.KeepIdentifyingResourceAttributes)
|
||||
})
|
||||
}
|
||||
|
||||
func TestOTLPAllowUTF8(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
fpath := filepath.Join("testdata", "otlp_allow_utf8.good.yml")
|
||||
verify := func(t *testing.T, conf *Config, err error) {
|
||||
t.Helper()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, NoUTF8EscapingWithSuffixes, conf.OTLPConfig.TranslationStrategy)
|
||||
}
|
||||
|
||||
t.Run("LoadFile", func(t *testing.T) {
|
||||
conf, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||
verify(t, conf, err)
|
||||
})
|
||||
t.Run("Load", func(t *testing.T) {
|
||||
content, err := os.ReadFile(fpath)
|
||||
require.NoError(t, err)
|
||||
conf, err := Load(string(content), promslog.NewNopLogger())
|
||||
verify(t, conf, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("incompatible config", func(t *testing.T) {
|
||||
fpath := filepath.Join("testdata", "otlp_allow_utf8.incompatible.yml")
|
||||
verify := func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
require.ErrorContains(t, err, `OTLP translation strategy NoUTF8EscapingWithSuffixes is not allowed when UTF8 is disabled`)
|
||||
}
|
||||
|
||||
t.Run("LoadFile", func(t *testing.T) {
|
||||
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||
verify(t, err)
|
||||
})
|
||||
t.Run("Load", func(t *testing.T) {
|
||||
content, err := os.ReadFile(fpath)
|
||||
require.NoError(t, err)
|
||||
_, err = Load(string(content), promslog.NewNopLogger())
|
||||
t.Log("err", err)
|
||||
verify(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
fpath := filepath.Join("testdata", "otlp_allow_utf8.bad.yml")
|
||||
verify := func(t *testing.T, err error) {
|
||||
t.Helper()
|
||||
require.ErrorContains(t, err, `unsupported OTLP translation strategy "Invalid"`)
|
||||
}
|
||||
|
||||
t.Run("LoadFile", func(t *testing.T) {
|
||||
_, err := LoadFile(fpath, false, promslog.NewNopLogger())
|
||||
verify(t, err)
|
||||
})
|
||||
t.Run("Load", func(t *testing.T) {
|
||||
content, err := os.ReadFile(fpath)
|
||||
require.NoError(t, err)
|
||||
_, err = Load(string(content), promslog.NewNopLogger())
|
||||
verify(t, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||
// an overwritten default field in the global config permanently changes the default.
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedConf, c)
|
||||
}
|
||||
|
||||
func TestScrapeIntervalLarger(t *testing.T) {
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.ScrapeConfigs, 1)
|
||||
for _, sc := range c.ScrapeConfigs {
|
||||
|
@ -1569,7 +1653,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
|
|||
|
||||
// YAML marshaling must not reveal authentication credentials.
|
||||
func TestElideSecrets(t *testing.T) {
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
||||
|
@ -1586,31 +1670,31 @@ func TestElideSecrets(t *testing.T) {
|
|||
|
||||
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
||||
// Parse a valid file that sets a rule files with an absolute path
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ruleFilesExpectedConf, c)
|
||||
}
|
||||
|
||||
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesWithKubeConfig(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesSelectors(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -2080,12 +2164,20 @@ var expectedErrors = []struct {
|
|||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4] for scrape config with job name "node"`,
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols.bad.yml: scrape_protocols: unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0] for scrape config with job name "node"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_scrape_protocols2.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_scrape_protocols2.bad.yml: duplicated protocol in scrape_protocols, got [OpenMetricsText1.0.0 PrometheusProto OpenMetricsText1.0.0] for scrape config with job name "node"`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_fallback_scrape_protocol1.bad.yml",
|
||||
errMsg: `parsing YAML file testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml: invalid fallback_scrape_protocol for scrape config with job name "node": unknown scrape protocol prometheusproto, supported: [OpenMetricsText0.0.1 OpenMetricsText1.0.0 PrometheusProto PrometheusText0.0.4 PrometheusText1.0.0]`,
|
||||
},
|
||||
{
|
||||
filename: "scrape_config_files_fallback_scrape_protocol2.bad.yml",
|
||||
errMsg: `unmarshal errors`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestBadConfigs(t *testing.T) {
|
||||
|
@ -2094,7 +2186,7 @@ func TestBadConfigs(t *testing.T) {
|
|||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
for _, ee := range expectedErrors {
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, ee.errMsg,
|
||||
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||
}
|
||||
|
@ -2125,9 +2217,10 @@ func TestBadStaticConfigsYML(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyConfig(t *testing.T) {
|
||||
c, err := Load("", false, promslog.NewNopLogger())
|
||||
c, err := Load("", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.loaded = true
|
||||
require.Equal(t, exp, *c)
|
||||
}
|
||||
|
||||
|
@ -2135,38 +2228,34 @@ func TestExpandExternalLabels(t *testing.T) {
|
|||
// Cleanup ant TEST env variable that could exist on the system.
|
||||
os.Setenv("TEST", "")
|
||||
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
os.Setenv("TEST", "TestValue")
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
}
|
||||
|
||||
func TestAgentMode(t *testing.T) {
|
||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger())
|
||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
||||
|
||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, c.RemoteWriteConfigs)
|
||||
|
||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger())
|
||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.RemoteWriteConfigs, 1)
|
||||
require.Equal(
|
||||
|
@ -2177,10 +2266,11 @@ func TestAgentMode(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyGlobalBlock(t *testing.T) {
|
||||
c, err := Load("global:\n", false, promslog.NewNopLogger())
|
||||
c, err := Load("global:\n", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.Runtime = DefaultRuntimeConfig
|
||||
exp.loaded = true
|
||||
require.Equal(t, exp, *c)
|
||||
}
|
||||
|
||||
|
@ -2332,7 +2422,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger())
|
||||
c, err := LoadFile(tc.configFile, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
scfgs, err := c.GetScrapeConfigs()
|
||||
|
@ -2350,7 +2440,7 @@ func kubernetesSDHostURL() config.URL {
|
|||
}
|
||||
|
||||
func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -2397,7 +2487,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger())
|
||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -2410,3 +2500,69 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScrapeProtocolHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
proto ScrapeProtocol
|
||||
expectedValue string
|
||||
}{
|
||||
{
|
||||
name: "blank",
|
||||
proto: ScrapeProtocol(""),
|
||||
expectedValue: "",
|
||||
},
|
||||
{
|
||||
name: "invalid",
|
||||
proto: ScrapeProtocol("invalid"),
|
||||
expectedValue: "",
|
||||
},
|
||||
{
|
||||
name: "prometheus protobuf",
|
||||
proto: PrometheusProto,
|
||||
expectedValue: "application/vnd.google.protobuf",
|
||||
},
|
||||
{
|
||||
name: "prometheus text 0.0.4",
|
||||
proto: PrometheusText0_0_4,
|
||||
expectedValue: "text/plain",
|
||||
},
|
||||
{
|
||||
name: "prometheus text 1.0.0",
|
||||
proto: PrometheusText1_0_0,
|
||||
expectedValue: "text/plain",
|
||||
},
|
||||
{
|
||||
name: "openmetrics 0.0.1",
|
||||
proto: OpenMetricsText0_0_1,
|
||||
expectedValue: "application/openmetrics-text",
|
||||
},
|
||||
{
|
||||
name: "openmetrics 1.0.0",
|
||||
proto: OpenMetricsText1_0_0,
|
||||
expectedValue: "application/openmetrics-text",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mediaType := tc.proto.HeaderMediaType()
|
||||
|
||||
require.Equal(t, tc.expectedValue, mediaType)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Regression test against https://github.com/prometheus/prometheus/issues/15538
|
||||
func TestGetScrapeConfigs_Loaded(t *testing.T) {
|
||||
t.Run("without load", func(t *testing.T) {
|
||||
c := &Config{}
|
||||
_, err := c.GetScrapeConfigs()
|
||||
require.EqualError(t, err, "scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen")
|
||||
})
|
||||
t.Run("with load", func(t *testing.T) {
|
||||
c, err := Load("", promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = c.GetScrapeConfigs()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@ package config
|
|||
const ruleFilesConfigFile = "testdata/rules_abs_path_windows.good.yml"
|
||||
|
||||
var ruleFilesExpectedConf = &Config{
|
||||
loaded: true,
|
||||
|
||||
GlobalConfig: DefaultGlobalConfig,
|
||||
Runtime: DefaultRuntimeConfig,
|
||||
RuleFiles: []string{
|
||||
|
|
2
config/testdata/conf.good.yml
vendored
2
config/testdata/conf.good.yml
vendored
|
@ -74,6 +74,8 @@ scrape_configs:
|
|||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
fallback_scrape_protocol: PrometheusText0.0.4
|
||||
|
||||
scrape_failure_log_file: fail_prom.log
|
||||
file_sd_configs:
|
||||
- files:
|
||||
|
|
7
config/testdata/config_with_deprecated_am_api_config.yml
vendored
Normal file
7
config/testdata/config_with_deprecated_am_api_config.yml
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: http
|
||||
api_version: v1
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- nonexistent_file.yml
|
2
config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml
vendored
Normal file
2
config/testdata/otlp_allow_keep_identifying_resource_attributes.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
keep_identifying_resource_attributes: true
|
4
config/testdata/otlp_allow_utf8.bad.yml
vendored
Normal file
4
config/testdata/otlp_allow_utf8.bad.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
otlp:
|
||||
translation_strategy: Invalid
|
2
config/testdata/otlp_allow_utf8.good.yml
vendored
Normal file
2
config/testdata/otlp_allow_utf8.good.yml
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
otlp:
|
||||
translation_strategy: NoUTF8EscapingWithSuffixes
|
4
config/testdata/otlp_allow_utf8.incompatible.yml
vendored
Normal file
4
config/testdata/otlp_allow_utf8.incompatible.yml
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
global:
|
||||
metric_name_validation_scheme: legacy
|
||||
otlp:
|
||||
translation_strategy: NoUTF8EscapingWithSuffixes
|
5
config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_fallback_scrape_protocol1.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
fallback_scrape_protocol: "prometheusproto"
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
5
config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml
vendored
Normal file
5
config/testdata/scrape_config_files_fallback_scrape_protocol2.bad.yml
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
scrape_configs:
|
||||
- job_name: node
|
||||
fallback_scrape_protocol: ["OpenMetricsText1.0.0", "PrometheusText0.0.4"]
|
||||
static_configs:
|
||||
- targets: ['localhost:8080']
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -148,7 +149,7 @@ type EC2Discovery struct {
|
|||
*refresh.Discovery
|
||||
logger *slog.Logger
|
||||
cfg *EC2SDConfig
|
||||
ec2 *ec2.EC2
|
||||
ec2 ec2iface.EC2API
|
||||
|
||||
// azToAZID maps this account's availability zones to their underlying AZ
|
||||
// ID, e.g. eu-west-2a -> euw2-az2. Refreshes are performed sequentially, so
|
||||
|
@ -160,7 +161,7 @@ type EC2Discovery struct {
|
|||
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||
m, ok := metrics.(*ec2Metrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
@ -182,7 +183,7 @@ func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.D
|
|||
return d, nil
|
||||
}
|
||||
|
||||
func (d *EC2Discovery) ec2Client(context.Context) (*ec2.EC2, error) {
|
||||
func (d *EC2Discovery) ec2Client(context.Context) (ec2iface.EC2API, error) {
|
||||
if d.ec2 != nil {
|
||||
return d.ec2, nil
|
||||
}
|
||||
|
|
434
discovery/aws/ec2_test.go
Normal file
434
discovery/aws/ec2_test.go
Normal file
|
@ -0,0 +1,434 @@
|
|||
// Copyright 2024 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
// Helper function to get pointers on literals.
|
||||
// NOTE: this is common between a few tests. In the future it might worth to move this out into a separate package.
|
||||
func strptr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
func boolptr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func int64ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// Struct for test data.
|
||||
type ec2DataStore struct {
|
||||
region string
|
||||
|
||||
azToAZID map[string]string
|
||||
|
||||
ownerID string
|
||||
|
||||
instances []*ec2.Instance
|
||||
}
|
||||
|
||||
// The tests itself.
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m)
|
||||
}
|
||||
|
||||
func TestEC2DiscoveryRefreshAZIDs(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// iterate through the test cases
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
shouldFail bool
|
||||
ec2Data *ec2DataStore
|
||||
}{
|
||||
{
|
||||
name: "Normal",
|
||||
shouldFail: false,
|
||||
ec2Data: &ec2DataStore{
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "HandleError",
|
||||
shouldFail: true,
|
||||
ec2Data: &ec2DataStore{},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := newMockEC2Client(tt.ec2Data)
|
||||
|
||||
d := &EC2Discovery{
|
||||
ec2: client,
|
||||
}
|
||||
|
||||
err := d.refreshAZIDs(ctx)
|
||||
if tt.shouldFail {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, client.ec2Data.azToAZID, d.azToAZID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEC2DiscoveryRefresh(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// iterate through the test cases
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
ec2Data *ec2DataStore
|
||||
expected []*targetgroup.Group
|
||||
}{
|
||||
{
|
||||
name: "NoPrivateIp",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-noprivateip",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
InstanceId: strptr("instance-id-noprivateip"),
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-noprivateip",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NoVpc",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-novpc",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
ownerID: "owner-id-novpc",
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
// set every possible options and test them here
|
||||
Architecture: strptr("architecture-novpc"),
|
||||
ImageId: strptr("ami-novpc"),
|
||||
InstanceId: strptr("instance-id-novpc"),
|
||||
InstanceLifecycle: strptr("instance-lifecycle-novpc"),
|
||||
InstanceType: strptr("instance-type-novpc"),
|
||||
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
|
||||
Platform: strptr("platform-novpc"),
|
||||
PrivateDnsName: strptr("private-dns-novpc"),
|
||||
PrivateIpAddress: strptr("1.2.3.4"),
|
||||
PublicDnsName: strptr("public-dns-novpc"),
|
||||
PublicIpAddress: strptr("42.42.42.2"),
|
||||
State: &ec2.InstanceState{Name: strptr("running")},
|
||||
// test tags once and for all
|
||||
Tags: []*ec2.Tag{
|
||||
{Key: strptr("tag-1-key"), Value: strptr("tag-1-value")},
|
||||
{Key: strptr("tag-2-key"), Value: strptr("tag-2-value")},
|
||||
nil,
|
||||
{Value: strptr("tag-4-value")},
|
||||
{Key: strptr("tag-5-key")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-novpc",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("1.2.3.4:4242"),
|
||||
"__meta_ec2_ami": model.LabelValue("ami-novpc"),
|
||||
"__meta_ec2_architecture": model.LabelValue("architecture-novpc"),
|
||||
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
|
||||
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
|
||||
"__meta_ec2_instance_id": model.LabelValue("instance-id-novpc"),
|
||||
"__meta_ec2_instance_lifecycle": model.LabelValue("instance-lifecycle-novpc"),
|
||||
"__meta_ec2_instance_type": model.LabelValue("instance-type-novpc"),
|
||||
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||
"__meta_ec2_owner_id": model.LabelValue("owner-id-novpc"),
|
||||
"__meta_ec2_platform": model.LabelValue("platform-novpc"),
|
||||
"__meta_ec2_private_dns_name": model.LabelValue("private-dns-novpc"),
|
||||
"__meta_ec2_private_ip": model.LabelValue("1.2.3.4"),
|
||||
"__meta_ec2_public_dns_name": model.LabelValue("public-dns-novpc"),
|
||||
"__meta_ec2_public_ip": model.LabelValue("42.42.42.2"),
|
||||
"__meta_ec2_region": model.LabelValue("region-novpc"),
|
||||
"__meta_ec2_tag_tag_1_key": model.LabelValue("tag-1-value"),
|
||||
"__meta_ec2_tag_tag_2_key": model.LabelValue("tag-2-value"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ipv4",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-ipv4",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
// just the minimum needed for the refresh work
|
||||
ImageId: strptr("ami-ipv4"),
|
||||
InstanceId: strptr("instance-id-ipv4"),
|
||||
InstanceType: strptr("instance-type-ipv4"),
|
||||
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-c")},
|
||||
PrivateIpAddress: strptr("5.6.7.8"),
|
||||
State: &ec2.InstanceState{Name: strptr("running")},
|
||||
SubnetId: strptr("azid-3"),
|
||||
VpcId: strptr("vpc-ipv4"),
|
||||
// network intefaces
|
||||
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
|
||||
// interface without subnet -> should be ignored
|
||||
{
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:1::1"),
|
||||
IsPrimaryIpv6: boolptr(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
// interface with subnet, no IPv6
|
||||
{
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||
SubnetId: strptr("azid-3"),
|
||||
},
|
||||
// interface with another subnet, no IPv6
|
||||
{
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||
SubnetId: strptr("azid-1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-ipv4",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("5.6.7.8:4242"),
|
||||
"__meta_ec2_ami": model.LabelValue("ami-ipv4"),
|
||||
"__meta_ec2_availability_zone": model.LabelValue("azname-c"),
|
||||
"__meta_ec2_availability_zone_id": model.LabelValue("azid-3"),
|
||||
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv4"),
|
||||
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv4"),
|
||||
"__meta_ec2_owner_id": model.LabelValue(""),
|
||||
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-3"),
|
||||
"__meta_ec2_private_ip": model.LabelValue("5.6.7.8"),
|
||||
"__meta_ec2_region": model.LabelValue("region-ipv4"),
|
||||
"__meta_ec2_subnet_id": model.LabelValue(",azid-3,azid-1,"),
|
||||
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Ipv6",
|
||||
ec2Data: &ec2DataStore{
|
||||
region: "region-ipv6",
|
||||
azToAZID: map[string]string{
|
||||
"azname-a": "azid-1",
|
||||
"azname-b": "azid-2",
|
||||
"azname-c": "azid-3",
|
||||
},
|
||||
instances: []*ec2.Instance{
|
||||
{
|
||||
// just the minimum needed for the refresh work
|
||||
ImageId: strptr("ami-ipv6"),
|
||||
InstanceId: strptr("instance-id-ipv6"),
|
||||
InstanceType: strptr("instance-type-ipv6"),
|
||||
Placement: &ec2.Placement{AvailabilityZone: strptr("azname-b")},
|
||||
PrivateIpAddress: strptr("9.10.11.12"),
|
||||
State: &ec2.InstanceState{Name: strptr("running")},
|
||||
SubnetId: strptr("azid-2"),
|
||||
VpcId: strptr("vpc-ipv6"),
|
||||
// network intefaces
|
||||
NetworkInterfaces: []*ec2.InstanceNetworkInterface{
|
||||
// interface without primary IPv6, index 2
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(3),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::1:1"),
|
||||
IsPrimaryIpv6: boolptr(false),
|
||||
},
|
||||
},
|
||||
SubnetId: strptr("azid-2"),
|
||||
},
|
||||
// interface with primary IPv6, index 1
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(1),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::2:1"),
|
||||
IsPrimaryIpv6: boolptr(false),
|
||||
},
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::2:2"),
|
||||
IsPrimaryIpv6: boolptr(true),
|
||||
},
|
||||
},
|
||||
SubnetId: strptr("azid-2"),
|
||||
},
|
||||
// interface with primary IPv6, index 3
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(3),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{
|
||||
{
|
||||
Ipv6Address: strptr("2001:db8:2::3:1"),
|
||||
IsPrimaryIpv6: boolptr(true),
|
||||
},
|
||||
},
|
||||
SubnetId: strptr("azid-1"),
|
||||
},
|
||||
// interface without primary IPv6, index 0
|
||||
{
|
||||
Attachment: &ec2.InstanceNetworkInterfaceAttachment{
|
||||
DeviceIndex: int64ptr(0),
|
||||
},
|
||||
Ipv6Addresses: []*ec2.InstanceIpv6Address{},
|
||||
SubnetId: strptr("azid-3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
{
|
||||
Source: "region-ipv6",
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("9.10.11.12:4242"),
|
||||
"__meta_ec2_ami": model.LabelValue("ami-ipv6"),
|
||||
"__meta_ec2_availability_zone": model.LabelValue("azname-b"),
|
||||
"__meta_ec2_availability_zone_id": model.LabelValue("azid-2"),
|
||||
"__meta_ec2_instance_id": model.LabelValue("instance-id-ipv6"),
|
||||
"__meta_ec2_instance_state": model.LabelValue("running"),
|
||||
"__meta_ec2_instance_type": model.LabelValue("instance-type-ipv6"),
|
||||
"__meta_ec2_ipv6_addresses": model.LabelValue(",2001:db8:2::1:1,2001:db8:2::2:1,2001:db8:2::2:2,2001:db8:2::3:1,"),
|
||||
"__meta_ec2_owner_id": model.LabelValue(""),
|
||||
"__meta_ec2_primary_ipv6_addresses": model.LabelValue(",,2001:db8:2::2:2,,2001:db8:2::3:1,"),
|
||||
"__meta_ec2_primary_subnet_id": model.LabelValue("azid-2"),
|
||||
"__meta_ec2_private_ip": model.LabelValue("9.10.11.12"),
|
||||
"__meta_ec2_region": model.LabelValue("region-ipv6"),
|
||||
"__meta_ec2_subnet_id": model.LabelValue(",azid-2,azid-1,azid-3,"),
|
||||
"__meta_ec2_vpc_id": model.LabelValue("vpc-ipv6"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := newMockEC2Client(tt.ec2Data)
|
||||
|
||||
d := &EC2Discovery{
|
||||
ec2: client,
|
||||
cfg: &EC2SDConfig{
|
||||
Port: 4242,
|
||||
Region: client.ec2Data.region,
|
||||
},
|
||||
}
|
||||
|
||||
g, err := d.refresh(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expected, g)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// EC2 client mock.
|
||||
type mockEC2Client struct {
|
||||
ec2iface.EC2API
|
||||
ec2Data ec2DataStore
|
||||
}
|
||||
|
||||
func newMockEC2Client(ec2Data *ec2DataStore) *mockEC2Client {
|
||||
client := mockEC2Client{
|
||||
ec2Data: *ec2Data,
|
||||
}
|
||||
return &client
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeAvailabilityZonesWithContext(ctx aws.Context, input *ec2.DescribeAvailabilityZonesInput, opts ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error) {
|
||||
if len(m.ec2Data.azToAZID) == 0 {
|
||||
return nil, errors.New("No AZs found")
|
||||
}
|
||||
|
||||
azs := make([]*ec2.AvailabilityZone, len(m.ec2Data.azToAZID))
|
||||
|
||||
i := 0
|
||||
for k, v := range m.ec2Data.azToAZID {
|
||||
azs[i] = &ec2.AvailabilityZone{
|
||||
ZoneName: strptr(k),
|
||||
ZoneId: strptr(v),
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
return &ec2.DescribeAvailabilityZonesOutput{
|
||||
AvailabilityZones: azs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *mockEC2Client) DescribeInstancesPagesWithContext(ctx aws.Context, input *ec2.DescribeInstancesInput, fn func(*ec2.DescribeInstancesOutput, bool) bool, opts ...request.Option) error {
|
||||
r := ec2.Reservation{}
|
||||
r.SetInstances(m.ec2Data.instances)
|
||||
r.SetOwnerId(m.ec2Data.ownerID)
|
||||
|
||||
o := ec2.DescribeInstancesOutput{}
|
||||
o.SetReservations([]*ec2.Reservation{&r})
|
||||
|
||||
_ = fn(&o, true)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -134,7 +134,7 @@ type LightsailDiscovery struct {
|
|||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||
m, ok := metrics.(*lightsailMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -70,18 +70,14 @@ const (
|
|||
authMethodManagedIdentity = "ManagedIdentity"
|
||||
)
|
||||
|
||||
var (
|
||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
// DefaultSDConfig is the default Azure SD configuration.
|
||||
DefaultSDConfig = SDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Environment: "AzurePublicCloud",
|
||||
AuthenticationMethod: authMethodOAuth,
|
||||
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
||||
}
|
||||
)
|
||||
// DefaultSDConfig is the default Azure SD configuration.
|
||||
var DefaultSDConfig = SDConfig{
|
||||
Port: 80,
|
||||
RefreshInterval: model.Duration(5 * time.Minute),
|
||||
Environment: "AzurePublicCloud",
|
||||
AuthenticationMethod: authMethodOAuth,
|
||||
HTTPClientConfig: config_util.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
||||
var environments = map[string]cloud.Configuration{
|
||||
"AZURECHINACLOUD": cloud.AzureChina,
|
||||
|
@ -186,7 +182,7 @@ type Discovery struct {
|
|||
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*azureMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
@ -233,21 +229,21 @@ type azureClient struct {
|
|||
|
||||
var _ client = &azureClient{}
|
||||
|
||||
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
||||
func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) {
|
||||
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
||||
// createAzureClient is a helper method for creating an Azure compute client to ARM.
|
||||
func (d *Discovery) createAzureClient() (client, error) {
|
||||
cloudConfiguration, err := CloudConfigurationFromName(d.cfg.Environment)
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
}
|
||||
|
||||
var c azureClient
|
||||
c.logger = logger
|
||||
c.logger = d.logger
|
||||
|
||||
telemetry := policy.TelemetryOptions{
|
||||
ApplicationID: userAgent,
|
||||
ApplicationID: version.PrometheusUserAgent(),
|
||||
}
|
||||
|
||||
credential, err := newCredential(cfg, policy.ClientOptions{
|
||||
credential, err := newCredential(*d.cfg, policy.ClientOptions{
|
||||
Cloud: cloudConfiguration,
|
||||
Telemetry: telemetry,
|
||||
})
|
||||
|
@ -255,7 +251,7 @@ func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) {
|
|||
return &azureClient{}, err
|
||||
}
|
||||
|
||||
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "azure_sd")
|
||||
client, err := config_util.NewClientFromConfig(d.cfg.HTTPClientConfig, "azure_sd")
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
}
|
||||
|
@ -267,22 +263,22 @@ func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) {
|
|||
},
|
||||
}
|
||||
|
||||
c.vm, err = armcompute.NewVirtualMachinesClient(cfg.SubscriptionID, credential, options)
|
||||
c.vm, err = armcompute.NewVirtualMachinesClient(d.cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
}
|
||||
|
||||
c.nic, err = armnetwork.NewInterfacesClient(cfg.SubscriptionID, credential, options)
|
||||
c.nic, err = armnetwork.NewInterfacesClient(d.cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
}
|
||||
|
||||
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(cfg.SubscriptionID, credential, options)
|
||||
c.vmss, err = armcompute.NewVirtualMachineScaleSetsClient(d.cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
}
|
||||
|
||||
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(cfg.SubscriptionID, credential, options)
|
||||
c.vmssvm, err = armcompute.NewVirtualMachineScaleSetVMsClient(d.cfg.SubscriptionID, credential, options)
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
}
|
||||
|
@ -350,15 +346,7 @@ func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, er
|
|||
return resourceID, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
defer d.logger.Debug("Azure discovery completed")
|
||||
|
||||
client, err := createAzureClient(*d.cfg, d.logger)
|
||||
if err != nil {
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||
}
|
||||
|
||||
func (d *Discovery) refreshAzureClient(ctx context.Context, client client) ([]*targetgroup.Group, error) {
|
||||
machines, err := client.getVMs(ctx, d.cfg.ResourceGroup)
|
||||
if err != nil {
|
||||
d.metrics.failuresCount.Inc()
|
||||
|
@ -418,6 +406,18 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
return []*targetgroup.Group{&tg}, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
defer d.logger.Debug("Azure discovery completed")
|
||||
|
||||
client, err := d.createAzureClient()
|
||||
if err != nil {
|
||||
d.metrics.failuresCount.Inc()
|
||||
return nil, fmt.Errorf("could not create Azure client: %w", err)
|
||||
}
|
||||
|
||||
return d.refreshAzureClient(ctx, client)
|
||||
}
|
||||
|
||||
func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualMachine) (model.LabelSet, error) {
|
||||
r, err := newAzureResourceFromID(vm.ID, d.logger)
|
||||
if err != nil {
|
||||
|
@ -458,11 +458,10 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
|||
networkInterface, err = client.getVMScaleSetVMNetworkInterfaceByID(ctx, nicID, vm.ScaleSet, vm.InstanceID)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, errorNotFound) {
|
||||
d.logger.Warn("Network interface does not exist", "name", nicID, "err", err)
|
||||
} else {
|
||||
if !errors.Is(err, errorNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
d.logger.Warn("Network interface does not exist", "name", nicID, "err", err)
|
||||
// Get out of this routine because we cannot continue without a network interface.
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -15,19 +15,34 @@ package azure
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
|
||||
azfake "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5"
|
||||
fake "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5/fake"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||
fakenetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4/fake"
|
||||
cache "github.com/Code-Hex/go-generics-cache"
|
||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const defaultMockNetworkID string = "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
goleak.VerifyTestMain(m,
|
||||
goleak.IgnoreTopFunction("github.com/Code-Hex/go-generics-cache.(*janitor).run.func1"),
|
||||
|
@ -96,13 +111,12 @@ func TestVMToLabelSet(t *testing.T) {
|
|||
vmType := "type"
|
||||
location := "westeurope"
|
||||
computerName := "computer_name"
|
||||
networkID := "/subscriptions/00000000-0000-0000-0000-000000000000/network1"
|
||||
ipAddress := "10.20.30.40"
|
||||
primary := true
|
||||
networkProfile := armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: &networkID,
|
||||
ID: to.Ptr(defaultMockNetworkID),
|
||||
Properties: &armcompute.NetworkInterfaceReferenceProperties{Primary: &primary},
|
||||
},
|
||||
},
|
||||
|
@ -139,7 +153,7 @@ func TestVMToLabelSet(t *testing.T) {
|
|||
Location: location,
|
||||
OsType: "Linux",
|
||||
Tags: map[string]*string{},
|
||||
NetworkInterfaces: []string{networkID},
|
||||
NetworkInterfaces: []string{defaultMockNetworkID},
|
||||
Size: size,
|
||||
}
|
||||
|
||||
|
@ -154,7 +168,8 @@ func TestVMToLabelSet(t *testing.T) {
|
|||
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
|
||||
}
|
||||
network := armnetwork.Interface{
|
||||
Name: &networkID,
|
||||
Name: to.Ptr(defaultMockNetworkID),
|
||||
ID: to.Ptr(defaultMockNetworkID),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
Primary: &primary,
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
|
@ -164,9 +179,9 @@ func TestVMToLabelSet(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
client := &mockAzureClient{
|
||||
networkInterface: &network,
|
||||
}
|
||||
|
||||
client := createMockAzureClient(t, nil, nil, nil, network, nil)
|
||||
|
||||
labelSet, err := d.vmToLabelSet(context.Background(), client, actualVM)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, labelSet, 11)
|
||||
|
@ -475,34 +490,372 @@ func TestNewAzureResourceFromID(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAzureRefresh(t *testing.T) {
|
||||
tests := []struct {
|
||||
scenario string
|
||||
vmResp []armcompute.VirtualMachinesClientListAllResponse
|
||||
vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse
|
||||
vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse
|
||||
interfacesResp armnetwork.Interface
|
||||
expectedTG []*targetgroup.Group
|
||||
}{
|
||||
{
|
||||
scenario: "VMs, VMSS and VMSSVMs in Multiple Responses",
|
||||
vmResp: []armcompute.VirtualMachinesClientListAllResponse{
|
||||
{
|
||||
VirtualMachineListResult: armcompute.VirtualMachineListResult{
|
||||
Value: []*armcompute.VirtualMachine{
|
||||
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1"), to.Ptr("vm1")),
|
||||
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2"), to.Ptr("vm2")),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VirtualMachineListResult: armcompute.VirtualMachineListResult{
|
||||
Value: []*armcompute.VirtualMachine{
|
||||
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3"), to.Ptr("vm3")),
|
||||
defaultVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4"), to.Ptr("vm4")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
vmssResp: []armcompute.VirtualMachineScaleSetsClientListAllResponse{
|
||||
{
|
||||
VirtualMachineScaleSetListWithLinkResult: armcompute.VirtualMachineScaleSetListWithLinkResult{
|
||||
Value: []*armcompute.VirtualMachineScaleSet{
|
||||
{
|
||||
ID: to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1"),
|
||||
Name: to.Ptr("vmScaleSet1"),
|
||||
Location: to.Ptr("australiaeast"),
|
||||
Type: to.Ptr("Microsoft.Compute/virtualMachineScaleSets"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
vmssvmResp: []armcompute.VirtualMachineScaleSetVMsClientListResponse{
|
||||
{
|
||||
VirtualMachineScaleSetVMListResult: armcompute.VirtualMachineScaleSetVMListResult{
|
||||
Value: []*armcompute.VirtualMachineScaleSetVM{
|
||||
defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1"), to.Ptr("vmScaleSet1_vm1")),
|
||||
defaultVMSSVMWithIDAndName(to.Ptr("/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2"), to.Ptr("vmScaleSet1_vm2")),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
interfacesResp: armnetwork.Interface{
|
||||
ID: to.Ptr(defaultMockNetworkID),
|
||||
Properties: &armnetwork.InterfacePropertiesFormat{
|
||||
Primary: to.Ptr(true),
|
||||
IPConfigurations: []*armnetwork.InterfaceIPConfiguration{
|
||||
{Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
PrivateIPAddress: to.Ptr("10.0.0.1"),
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedTG: []*targetgroup.Group{
|
||||
{
|
||||
Targets: []model.LabelSet{
|
||||
{
|
||||
"__address__": "10.0.0.1:80",
|
||||
"__meta_azure_machine_computer_name": "computer_name",
|
||||
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm1",
|
||||
"__meta_azure_machine_location": "australiaeast",
|
||||
"__meta_azure_machine_name": "vm1",
|
||||
"__meta_azure_machine_os_type": "Linux",
|
||||
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||
"__meta_azure_machine_size": "size",
|
||||
"__meta_azure_machine_tag_prometheus": "",
|
||||
"__meta_azure_subscription_id": "",
|
||||
"__meta_azure_tenant_id": "",
|
||||
},
|
||||
{
|
||||
"__address__": "10.0.0.1:80",
|
||||
"__meta_azure_machine_computer_name": "computer_name",
|
||||
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm2",
|
||||
"__meta_azure_machine_location": "australiaeast",
|
||||
"__meta_azure_machine_name": "vm2",
|
||||
"__meta_azure_machine_os_type": "Linux",
|
||||
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||
"__meta_azure_machine_size": "size",
|
||||
"__meta_azure_machine_tag_prometheus": "",
|
||||
"__meta_azure_subscription_id": "",
|
||||
"__meta_azure_tenant_id": "",
|
||||
},
|
||||
{
|
||||
"__address__": "10.0.0.1:80",
|
||||
"__meta_azure_machine_computer_name": "computer_name",
|
||||
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm3",
|
||||
"__meta_azure_machine_location": "australiaeast",
|
||||
"__meta_azure_machine_name": "vm3",
|
||||
"__meta_azure_machine_os_type": "Linux",
|
||||
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||
"__meta_azure_machine_size": "size",
|
||||
"__meta_azure_machine_tag_prometheus": "",
|
||||
"__meta_azure_subscription_id": "",
|
||||
"__meta_azure_tenant_id": "",
|
||||
},
|
||||
{
|
||||
"__address__": "10.0.0.1:80",
|
||||
"__meta_azure_machine_computer_name": "computer_name",
|
||||
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/vm4",
|
||||
"__meta_azure_machine_location": "australiaeast",
|
||||
"__meta_azure_machine_name": "vm4",
|
||||
"__meta_azure_machine_os_type": "Linux",
|
||||
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||
"__meta_azure_machine_size": "size",
|
||||
"__meta_azure_machine_tag_prometheus": "",
|
||||
"__meta_azure_subscription_id": "",
|
||||
"__meta_azure_tenant_id": "",
|
||||
},
|
||||
{
|
||||
"__address__": "10.0.0.1:80",
|
||||
"__meta_azure_machine_computer_name": "computer_name",
|
||||
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm1",
|
||||
"__meta_azure_machine_location": "australiaeast",
|
||||
"__meta_azure_machine_name": "vmScaleSet1_vm1",
|
||||
"__meta_azure_machine_os_type": "Linux",
|
||||
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||
"__meta_azure_machine_scale_set": "vmScaleSet1",
|
||||
"__meta_azure_machine_size": "size",
|
||||
"__meta_azure_machine_tag_prometheus": "",
|
||||
"__meta_azure_subscription_id": "",
|
||||
"__meta_azure_tenant_id": "",
|
||||
},
|
||||
{
|
||||
"__address__": "10.0.0.1:80",
|
||||
"__meta_azure_machine_computer_name": "computer_name",
|
||||
"__meta_azure_machine_id": "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/vmScaleSet1/virtualMachines/vmScaleSet1_vm2",
|
||||
"__meta_azure_machine_location": "australiaeast",
|
||||
"__meta_azure_machine_name": "vmScaleSet1_vm2",
|
||||
"__meta_azure_machine_os_type": "Linux",
|
||||
"__meta_azure_machine_private_ip": "10.0.0.1",
|
||||
"__meta_azure_machine_resource_group": "{resourceGroup}",
|
||||
"__meta_azure_machine_scale_set": "vmScaleSet1",
|
||||
"__meta_azure_machine_size": "size",
|
||||
"__meta_azure_machine_tag_prometheus": "",
|
||||
"__meta_azure_subscription_id": "",
|
||||
"__meta_azure_tenant_id": "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.scenario, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
azureSDConfig := &DefaultSDConfig
|
||||
|
||||
azureClient := createMockAzureClient(t, tc.vmResp, tc.vmssResp, tc.vmssvmResp, tc.interfacesResp, nil)
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := azureSDConfig.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
|
||||
sd, err := NewDiscovery(azureSDConfig, nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
tg, err := sd.refreshAzureClient(context.Background(), azureClient)
|
||||
require.NoError(t, err)
|
||||
|
||||
sortTargetsByID(tg[0].Targets)
|
||||
require.Equal(t, tc.expectedTG, tg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockAzureClient struct {
|
||||
networkInterface *armnetwork.Interface
|
||||
azureClient
|
||||
}
|
||||
|
||||
var _ client = &mockAzureClient{}
|
||||
func createMockAzureClient(t *testing.T, vmResp []armcompute.VirtualMachinesClientListAllResponse, vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse, vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse, interfaceResp armnetwork.Interface, logger *slog.Logger) client {
|
||||
t.Helper()
|
||||
mockVMServer := defaultMockVMServer(vmResp)
|
||||
mockVMSSServer := defaultMockVMSSServer(vmssResp)
|
||||
mockVMScaleSetVMServer := defaultMockVMSSVMServer(vmssvmResp)
|
||||
mockInterfaceServer := defaultMockInterfaceServer(interfaceResp)
|
||||
|
||||
func (*mockAzureClient) getVMs(ctx context.Context, resourceGroup string) ([]virtualMachine, error) {
|
||||
return nil, nil
|
||||
}
|
||||
vmClient, err := armcompute.NewVirtualMachinesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||
ClientOptions: azcore.ClientOptions{
|
||||
Transport: fake.NewVirtualMachinesServerTransport(&mockVMServer),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
func (*mockAzureClient) getScaleSets(ctx context.Context, resourceGroup string) ([]armcompute.VirtualMachineScaleSet, error) {
|
||||
return nil, nil
|
||||
}
|
||||
vmssClient, err := armcompute.NewVirtualMachineScaleSetsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||
ClientOptions: azcore.ClientOptions{
|
||||
Transport: fake.NewVirtualMachineScaleSetsServerTransport(&mockVMSSServer),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
func (*mockAzureClient) getScaleSetVMs(ctx context.Context, scaleSet armcompute.VirtualMachineScaleSet) ([]virtualMachine, error) {
|
||||
return nil, nil
|
||||
}
|
||||
vmssvmClient, err := armcompute.NewVirtualMachineScaleSetVMsClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||
ClientOptions: azcore.ClientOptions{
|
||||
Transport: fake.NewVirtualMachineScaleSetVMsServerTransport(&mockVMScaleSetVMServer),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
func (m *mockAzureClient) getVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID string) (*armnetwork.Interface, error) {
|
||||
if networkInterfaceID == "" {
|
||||
return nil, fmt.Errorf("parameter networkInterfaceID cannot be empty")
|
||||
interfacesClient, err := armnetwork.NewInterfacesClient("fake-subscription-id", &azfake.TokenCredential{}, &arm.ClientOptions{
|
||||
ClientOptions: azcore.ClientOptions{
|
||||
Transport: fakenetwork.NewInterfacesServerTransport(&mockInterfaceServer),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return &mockAzureClient{
|
||||
azureClient: azureClient{
|
||||
vm: vmClient,
|
||||
vmss: vmssClient,
|
||||
vmssvm: vmssvmClient,
|
||||
nic: interfacesClient,
|
||||
logger: logger,
|
||||
},
|
||||
}
|
||||
return m.networkInterface, nil
|
||||
}
|
||||
|
||||
func (m *mockAzureClient) getVMScaleSetVMNetworkInterfaceByID(ctx context.Context, networkInterfaceID, scaleSetName, instanceID string) (*armnetwork.Interface, error) {
|
||||
if scaleSetName == "" {
|
||||
return nil, fmt.Errorf("parameter virtualMachineScaleSetName cannot be empty")
|
||||
func defaultMockInterfaceServer(interfaceResp armnetwork.Interface) fakenetwork.InterfacesServer {
|
||||
return fakenetwork.InterfacesServer{
|
||||
Get: func(ctx context.Context, resourceGroupName, networkInterfaceName string, options *armnetwork.InterfacesClientGetOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetResponse], errResp azfake.ErrorResponder) {
|
||||
resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetResponse{Interface: interfaceResp}, nil)
|
||||
return
|
||||
},
|
||||
GetVirtualMachineScaleSetNetworkInterface: func(ctx context.Context, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName string, options *armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceOptions) (resp azfake.Responder[armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse], errResp azfake.ErrorResponder) {
|
||||
resp.SetResponse(http.StatusOK, armnetwork.InterfacesClientGetVirtualMachineScaleSetNetworkInterfaceResponse{Interface: interfaceResp}, nil)
|
||||
return
|
||||
},
|
||||
}
|
||||
return m.networkInterface, nil
|
||||
}
|
||||
|
||||
func defaultMockVMServer(vmResp []armcompute.VirtualMachinesClientListAllResponse) fake.VirtualMachinesServer {
|
||||
return fake.VirtualMachinesServer{
|
||||
NewListAllPager: func(options *armcompute.VirtualMachinesClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachinesClientListAllResponse]) {
|
||||
for _, page := range vmResp {
|
||||
resp.AddPage(http.StatusOK, page, nil)
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockVMSSServer(vmssResp []armcompute.VirtualMachineScaleSetsClientListAllResponse) fake.VirtualMachineScaleSetsServer {
|
||||
return fake.VirtualMachineScaleSetsServer{
|
||||
NewListAllPager: func(options *armcompute.VirtualMachineScaleSetsClientListAllOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetsClientListAllResponse]) {
|
||||
for _, page := range vmssResp {
|
||||
resp.AddPage(http.StatusOK, page, nil)
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultMockVMSSVMServer(vmssvmResp []armcompute.VirtualMachineScaleSetVMsClientListResponse) fake.VirtualMachineScaleSetVMsServer {
|
||||
return fake.VirtualMachineScaleSetVMsServer{
|
||||
NewListPager: func(resourceGroupName, virtualMachineScaleSetName string, options *armcompute.VirtualMachineScaleSetVMsClientListOptions) (resp azfake.PagerResponder[armcompute.VirtualMachineScaleSetVMsClientListResponse]) {
|
||||
for _, page := range vmssvmResp {
|
||||
resp.AddPage(http.StatusOK, page, nil)
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultVMWithIDAndName(id, name *string) *armcompute.VirtualMachine {
|
||||
vmSize := armcompute.VirtualMachineSizeTypes("size")
|
||||
osType := armcompute.OperatingSystemTypesLinux
|
||||
defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachine/testVM"
|
||||
defaultName := "testVM"
|
||||
|
||||
if id == nil {
|
||||
id = &defaultID
|
||||
}
|
||||
if name == nil {
|
||||
name = &defaultName
|
||||
}
|
||||
|
||||
return &armcompute.VirtualMachine{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Type: to.Ptr("Microsoft.Compute/virtualMachines"),
|
||||
Location: to.Ptr("australiaeast"),
|
||||
Properties: &armcompute.VirtualMachineProperties{
|
||||
OSProfile: &armcompute.OSProfile{
|
||||
ComputerName: to.Ptr("computer_name"),
|
||||
},
|
||||
StorageProfile: &armcompute.StorageProfile{
|
||||
OSDisk: &armcompute.OSDisk{
|
||||
OSType: &osType,
|
||||
},
|
||||
},
|
||||
NetworkProfile: &armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
||||
{
|
||||
ID: to.Ptr(defaultMockNetworkID),
|
||||
},
|
||||
},
|
||||
},
|
||||
HardwareProfile: &armcompute.HardwareProfile{
|
||||
VMSize: &vmSize,
|
||||
},
|
||||
},
|
||||
Tags: map[string]*string{
|
||||
"prometheus": new(string),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func defaultVMSSVMWithIDAndName(id, name *string) *armcompute.VirtualMachineScaleSetVM {
|
||||
vmSize := armcompute.VirtualMachineSizeTypes("size")
|
||||
osType := armcompute.OperatingSystemTypesLinux
|
||||
defaultID := "/subscriptions/00000000-0000-0000-0000-00000000000/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/virtualMachineScaleSets/testVMScaleSet/virtualMachines/testVM"
|
||||
defaultName := "testVM"
|
||||
|
||||
if id == nil {
|
||||
id = &defaultID
|
||||
}
|
||||
if name == nil {
|
||||
name = &defaultName
|
||||
}
|
||||
|
||||
return &armcompute.VirtualMachineScaleSetVM{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Type: to.Ptr("Microsoft.Compute/virtualMachines"),
|
||||
InstanceID: to.Ptr("123"),
|
||||
Location: to.Ptr("australiaeast"),
|
||||
Properties: &armcompute.VirtualMachineScaleSetVMProperties{
|
||||
OSProfile: &armcompute.OSProfile{
|
||||
ComputerName: to.Ptr("computer_name"),
|
||||
},
|
||||
StorageProfile: &armcompute.StorageProfile{
|
||||
OSDisk: &armcompute.OSDisk{
|
||||
OSType: &osType,
|
||||
},
|
||||
},
|
||||
NetworkProfile: &armcompute.NetworkProfile{
|
||||
NetworkInterfaces: []*armcompute.NetworkInterfaceReference{
|
||||
{ID: to.Ptr(defaultMockNetworkID)},
|
||||
},
|
||||
},
|
||||
HardwareProfile: &armcompute.HardwareProfile{
|
||||
VMSize: &vmSize,
|
||||
},
|
||||
},
|
||||
Tags: map[string]*string{
|
||||
"prometheus": new(string),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func sortTargetsByID(targets []model.LabelSet) {
|
||||
slices.SortFunc(targets, func(a, b model.LabelSet) int {
|
||||
return strings.Compare(string(a["__meta_azure_machine_id"]), string(b["__meta_azure_machine_id"]))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -189,7 +190,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*consulMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
@ -241,22 +242,17 @@ func (d *Discovery) shouldWatch(name string, tags []string) bool {
|
|||
return d.shouldWatchFromName(name) && d.shouldWatchFromTags(tags)
|
||||
}
|
||||
|
||||
// shouldWatch returns whether the service of the given name should be watched based on its name.
|
||||
// shouldWatchFromName returns whether the service of the given name should be watched based on its name.
|
||||
func (d *Discovery) shouldWatchFromName(name string) bool {
|
||||
// If there's no fixed set of watched services, we watch everything.
|
||||
if len(d.watchedServices) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, sn := range d.watchedServices {
|
||||
if sn == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(d.watchedServices, name)
|
||||
}
|
||||
|
||||
// shouldWatch returns whether the service of the given name should be watched based on its tags.
|
||||
// shouldWatchFromTags returns whether the service of the given name should be watched based on its tags.
|
||||
// This gets called when the user doesn't specify a list of services in order to avoid watching
|
||||
// *all* services. Details in https://github.com/prometheus/prometheus/pull/3814
|
||||
func (d *Discovery) shouldWatchFromTags(tags []string) bool {
|
||||
|
|
|
@ -15,6 +15,7 @@ package digitalocean
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
@ -114,7 +115,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*digitaloceanMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
@ -131,7 +132,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove
|
|||
Transport: rt,
|
||||
Timeout: time.Duration(conf.RefreshInterval),
|
||||
},
|
||||
godo.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version)),
|
||||
godo.SetUserAgent(version.PrometheusUserAgent()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error setting up digital ocean agent: %w", err)
|
||||
|
|
|
@ -121,7 +121,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dnsMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -15,7 +15,7 @@ package dns
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"testing"
|
||||
|
@ -53,7 +53,7 @@ func TestDNS(t *testing.T) {
|
|||
Type: "A",
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
return nil, errors.New("some error")
|
||||
},
|
||||
expected: []*targetgroup.Group{},
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/prometheus/common/version"
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
var userAgent = version.PrometheusUserAgent()
|
||||
|
||||
type Applications struct {
|
||||
VersionsDelta int `xml:"versions__delta"`
|
||||
|
|
|
@ -16,7 +16,6 @@ package eureka
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -129,7 +128,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*eurekaMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "eureka_sd")
|
||||
|
|
|
@ -184,7 +184,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
fm, ok := metrics.(*fileMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -132,7 +132,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*gceMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -138,7 +138,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*hetznerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
r, err := newRefresher(conf, logger)
|
||||
|
|
|
@ -39,7 +39,7 @@ const (
|
|||
hetznerLabelRobotCancelled = hetznerRobotLabelPrefix + "cancelled"
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
var userAgent = version.PrometheusUserAgent()
|
||||
|
||||
// Discovery periodically performs Hetzner Robot requests. It implements
|
||||
// the Discoverer interface.
|
||||
|
|
|
@ -41,10 +41,10 @@ import (
|
|||
var (
|
||||
// DefaultSDConfig is the default HTTP SD configuration.
|
||||
DefaultSDConfig = SDConfig{
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
}
|
||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
userAgent = version.PrometheusUserAgent()
|
||||
matchContentType = regexp.MustCompile(`^(?i:application\/json(;\s*charset=("utf-8"|utf-8))?)$`)
|
||||
)
|
||||
|
||||
|
@ -86,17 +86,17 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
if c.URL == "" {
|
||||
return fmt.Errorf("URL is missing")
|
||||
return errors.New("URL is missing")
|
||||
}
|
||||
parsedURL, err := url.Parse(c.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" {
|
||||
return fmt.Errorf("URL scheme must be 'http' or 'https'")
|
||||
return errors.New("URL scheme must be 'http' or 'https'")
|
||||
}
|
||||
if parsedURL.Host == "" {
|
||||
return fmt.Errorf("host is missing in URL")
|
||||
return errors.New("host is missing in URL")
|
||||
}
|
||||
return c.HTTPClientConfig.Validate()
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*httpMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
|
|
|
@ -15,7 +15,6 @@ package ionos
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
|
@ -46,7 +45,7 @@ type Discovery struct{}
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ionosMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if conf.ionosEndpoint == "" {
|
||||
|
|
|
@ -15,7 +15,6 @@ package ionos
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -77,7 +76,7 @@ func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error
|
|||
Transport: rt,
|
||||
Timeout: time.Duration(conf.RefreshInterval),
|
||||
}
|
||||
cfg.UserAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
cfg.UserAgent = version.PrometheusUserAgent()
|
||||
|
||||
d.client = ionoscloud.NewAPIClient(cfg)
|
||||
|
||||
|
|
|
@ -102,10 +102,7 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
|
|||
return
|
||||
}
|
||||
|
||||
ep := &apiv1.Endpoints{}
|
||||
ep.Namespace = svc.Namespace
|
||||
ep.Name = svc.Name
|
||||
obj, exists, err := e.endpointsStore.Get(ep)
|
||||
obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name))
|
||||
if exists && err == nil {
|
||||
e.enqueue(obj.(*apiv1.Endpoints))
|
||||
}
|
||||
|
@ -167,8 +164,11 @@ func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node
|
|||
e.enqueueNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
nodeName, err := nodeName(o)
|
||||
if err != nil {
|
||||
l.Error("Error getting Node name", "err", err)
|
||||
}
|
||||
e.enqueueNode(nodeName)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -454,11 +454,8 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
if ref == nil || ref.Kind != "Pod" {
|
||||
return nil
|
||||
}
|
||||
p := &apiv1.Pod{}
|
||||
p.Namespace = ref.Namespace
|
||||
p.Name = ref.Name
|
||||
|
||||
obj, exists, err := e.podStore.Get(p)
|
||||
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
|
||||
if err != nil {
|
||||
e.logger.Error("resolving pod ref failed", "err", err)
|
||||
return nil
|
||||
|
@ -470,11 +467,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
}
|
||||
|
||||
func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
||||
svc := &apiv1.Service{}
|
||||
svc.Namespace = ns
|
||||
svc.Name = name
|
||||
|
||||
obj, exists, err := e.serviceStore.Get(svc)
|
||||
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
|
||||
if err != nil {
|
||||
e.logger.Error("retrieving service failed", "err", err)
|
||||
return
|
||||
|
@ -482,7 +475,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
|||
if !exists {
|
||||
return
|
||||
}
|
||||
svc = obj.(*apiv1.Service)
|
||||
svc := obj.(*apiv1.Service)
|
||||
|
||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||
}
|
||||
|
|
|
@ -18,10 +18,12 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -95,6 +97,7 @@ func makeEndpoints() *v1.Endpoints {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -149,6 +152,7 @@ func TestEndpointsDiscoveryBeforeRun(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
obj := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
|
@ -274,6 +278,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -292,6 +297,7 @@ func TestEndpointsDiscoveryDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -363,6 +369,7 @@ func TestEndpointsDiscoveryUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryEmptySubsets(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -391,6 +398,7 @@ func TestEndpointsDiscoveryEmptySubsets(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithService(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -456,6 +464,7 @@ func TestEndpointsDiscoveryWithService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, makeEndpoints())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -536,6 +545,7 @@ func TestEndpointsDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
|
@ -609,6 +619,7 @@ func TestEndpointsDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
node1 := makeNode("foobar", "", "", nodeLabels1, nil)
|
||||
|
@ -686,6 +697,7 @@ func TestEndpointsDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
epOne := makeEndpoints()
|
||||
epOne.Namespace = "ns1"
|
||||
objs := []runtime.Object{
|
||||
|
@ -837,6 +849,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
epOne := makeEndpoints()
|
||||
epOne.Namespace = "own-ns"
|
||||
|
||||
|
@ -931,6 +944,7 @@ func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
ep := makeEndpoints()
|
||||
ep.Namespace = "ns"
|
||||
|
||||
|
@ -976,6 +990,7 @@ func TestEndpointsDiscoveryEmptyPodStatus(t *testing.T) {
|
|||
// TestEndpointsDiscoveryUpdatePod makes sure that Endpoints discovery detects underlying Pods changes.
|
||||
// See https://github.com/prometheus/prometheus/issues/11305 for more details.
|
||||
func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||
t.Parallel()
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
|
@ -1095,6 +1110,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
|
||||
t.Parallel()
|
||||
objs := []runtime.Object{
|
||||
&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -1257,3 +1273,22 @@ func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
|
|||
},
|
||||
}.Run(t)
|
||||
}
|
||||
|
||||
func BenchmarkResolvePodRef(b *testing.B) {
|
||||
indexer := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, nil)
|
||||
e := &Endpoints{
|
||||
podStore: indexer,
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
p := e.resolvePodRef(&v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: "testpod",
|
||||
Namespace: "foo",
|
||||
})
|
||||
require.Nil(b, p)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -145,8 +145,11 @@ func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, n
|
|||
e.enqueueNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
e.enqueueNode(node.Name)
|
||||
nodeName, err := nodeName(o)
|
||||
if err != nil {
|
||||
l.Error("Error getting Node name", "err", err)
|
||||
}
|
||||
e.enqueueNode(nodeName)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -464,11 +467,8 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
if ref == nil || ref.Kind != "Pod" {
|
||||
return nil
|
||||
}
|
||||
p := &apiv1.Pod{}
|
||||
p.Namespace = ref.Namespace
|
||||
p.Name = ref.Name
|
||||
|
||||
obj, exists, err := e.podStore.Get(p)
|
||||
obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name))
|
||||
if err != nil {
|
||||
e.logger.Error("resolving pod ref failed", "err", err)
|
||||
return nil
|
||||
|
@ -481,19 +481,19 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
|
||||
func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) {
|
||||
var (
|
||||
svc = &apiv1.Service{}
|
||||
found bool
|
||||
name string
|
||||
)
|
||||
svc.Namespace = esa.namespace()
|
||||
ns := esa.namespace()
|
||||
|
||||
// Every EndpointSlice object has the Service they belong to in the
|
||||
// kubernetes.io/service-name label.
|
||||
svc.Name, found = esa.labels()[esa.labelServiceName()]
|
||||
name, found = esa.labels()[esa.labelServiceName()]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
obj, exists, err := e.serviceStore.Get(svc)
|
||||
obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name))
|
||||
if err != nil {
|
||||
e.logger.Error("retrieving service failed", "err", err)
|
||||
return
|
||||
|
@ -501,7 +501,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
|
|||
if !exists {
|
||||
return
|
||||
}
|
||||
svc = obj.(*apiv1.Service)
|
||||
svc := obj.(*apiv1.Service)
|
||||
|
||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
)
|
||||
|
||||
func Test_EndpointSliceAdaptor_v1(t *testing.T) {
|
||||
t.Parallel()
|
||||
endpointSlice := makeEndpointSliceV1()
|
||||
adaptor := newEndpointSliceAdaptorFromV1(endpointSlice)
|
||||
|
||||
|
|
|
@ -114,6 +114,7 @@ func makeEndpointSliceV1() *v1.EndpointSlice {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -195,6 +196,7 @@ func TestEndpointSliceDiscoveryBeforeRun(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
obj := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
|
@ -322,6 +324,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -340,6 +343,7 @@ func TestEndpointSliceDiscoveryDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -396,6 +400,7 @@ func TestEndpointSliceDiscoveryUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -424,6 +429,7 @@ func TestEndpointSliceDiscoveryEmptyEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -516,6 +522,7 @@ func TestEndpointSliceDiscoveryWithService(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{Names: []string{"default"}}, makeEndpointSliceV1())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -623,6 +630,7 @@ func TestEndpointSliceDiscoveryWithServiceUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
|
@ -722,6 +730,7 @@ func TestEndpointsSlicesDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
metadataConfig := AttachMetadataConfig{Node: true}
|
||||
nodeLabels1 := map[string]string{"az": "us-east1"}
|
||||
nodeLabels2 := map[string]string{"az": "us-west2"}
|
||||
|
@ -827,6 +836,7 @@ func TestEndpointsSlicesDiscoveryWithUpdatedNodeMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
epOne := makeEndpointSliceV1()
|
||||
epOne.Namespace = "ns1"
|
||||
objs := []runtime.Object{
|
||||
|
@ -1003,6 +1013,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
epOne := makeEndpointSliceV1()
|
||||
epOne.Namespace = "own-ns"
|
||||
|
||||
|
@ -1123,6 +1134,7 @@ func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
ep := makeEndpointSliceV1()
|
||||
ep.Namespace = "ns"
|
||||
|
||||
|
@ -1169,6 +1181,7 @@ func TestEndpointSliceDiscoveryEmptyPodStatus(t *testing.T) {
|
|||
// sets up indexing for the main Kube informer only when needed.
|
||||
// See: https://github.com/prometheus/prometheus/pull/13554#discussion_r1490965817
|
||||
func TestEndpointSliceInfIndexersCount(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
withNodeMetadata bool
|
||||
|
@ -1179,6 +1192,7 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
n *Discovery
|
||||
mainInfIndexersCount int
|
||||
|
@ -1204,6 +1218,7 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) {
|
||||
t.Parallel()
|
||||
objs := []runtime.Object{
|
||||
&v1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
|
@ -144,6 +144,7 @@ func expectedTargetGroups(ns string, tls TLSMode) map[string]*targetgroup.Group
|
|||
}
|
||||
|
||||
func TestIngressDiscoveryAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -158,6 +159,7 @@ func TestIngressDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIngressDiscoveryAddTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -172,6 +174,7 @@ func TestIngressDiscoveryAddTLS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIngressDiscoveryAddMixed(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"default"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -186,6 +189,7 @@ func TestIngressDiscoveryAddMixed(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIngressDiscoveryNamespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||
|
||||
expected := expectedTargetGroups("ns1", TLSNo)
|
||||
|
@ -207,6 +211,7 @@ func TestIngressDiscoveryNamespaces(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestIngressDiscoveryOwnNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true})
|
||||
|
||||
expected := expectedTargetGroups("own-ns", TLSNo)
|
||||
|
|
|
@ -59,14 +59,10 @@ const (
|
|||
presentValue = model.LabelValue("true")
|
||||
)
|
||||
|
||||
var (
|
||||
// Http header.
|
||||
userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
// DefaultSDConfig is the default Kubernetes SD configuration.
|
||||
DefaultSDConfig = SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
}
|
||||
)
|
||||
// DefaultSDConfig is the default Kubernetes SD configuration.
|
||||
var DefaultSDConfig = SDConfig{
|
||||
HTTPClientConfig: config.DefaultHTTPClientConfig,
|
||||
}
|
||||
|
||||
func init() {
|
||||
discovery.RegisterConfig(&SDConfig{})
|
||||
|
@ -173,7 +169,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return err
|
||||
}
|
||||
if c.Role == "" {
|
||||
return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
|
||||
return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)")
|
||||
}
|
||||
err = c.HTTPClientConfig.Validate()
|
||||
if err != nil {
|
||||
|
@ -181,20 +177,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
if c.APIServer.URL != nil && c.KubeConfig != "" {
|
||||
// Api-server and kubeconfig_file are mutually exclusive
|
||||
return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
|
||||
return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously")
|
||||
}
|
||||
if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
|
||||
// Kubeconfig_file and custom http config are mutually exclusive
|
||||
return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
|
||||
return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'")
|
||||
}
|
||||
if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
|
||||
return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
|
||||
return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
|
||||
}
|
||||
if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
|
||||
return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
|
||||
return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
|
||||
}
|
||||
if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
|
||||
return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
|
||||
return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
|
||||
}
|
||||
|
||||
foundSelectorRoles := make(map[Role]struct{})
|
||||
|
@ -288,7 +284,7 @@ func (d *Discovery) getNamespaces() []string {
|
|||
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
||||
m, ok := metrics.(*kubernetesMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if l == nil {
|
||||
|
@ -336,7 +332,7 @@ func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*
|
|||
}
|
||||
}
|
||||
|
||||
kcfg.UserAgent = userAgent
|
||||
kcfg.UserAgent = version.PrometheusUserAgent()
|
||||
kcfg.ContentType = "application/vnd.kubernetes.protobuf"
|
||||
|
||||
c, err := kubernetes.NewForConfig(kcfg)
|
||||
|
@ -672,7 +668,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde
|
|||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*apiv1.Pod)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not a pod")
|
||||
return nil, errors.New("object is not a pod")
|
||||
}
|
||||
return []string{pod.Spec.NodeName}, nil
|
||||
}
|
||||
|
@ -686,7 +682,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
|
|||
indexers[podIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not endpoints")
|
||||
return nil, errors.New("object is not endpoints")
|
||||
}
|
||||
var pods []string
|
||||
for _, target := range e.Subsets {
|
||||
|
@ -705,7 +701,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share
|
|||
indexers[nodeIndex] = func(obj interface{}) ([]string, error) {
|
||||
e, ok := obj.(*apiv1.Endpoints)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("object is not endpoints")
|
||||
return nil, errors.New("object is not endpoints")
|
||||
}
|
||||
var nodes []string
|
||||
for _, target := range e.Subsets {
|
||||
|
@ -751,7 +747,7 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object
|
|||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("object is not an endpointslice")
|
||||
return nil, errors.New("object is not an endpointslice")
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
|
@ -804,3 +800,13 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta,
|
|||
func namespacedName(namespace, name string) string {
|
||||
return namespace + "/" + name
|
||||
}
|
||||
|
||||
// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone.
|
||||
// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key.
|
||||
func nodeName(o interface{}) (string, error) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,9 @@ import (
|
|||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
@ -271,6 +273,7 @@ func (s *Service) hasSynced() bool {
|
|||
}
|
||||
|
||||
func TestRetryOnError(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, successAt := range []int{1, 2, 3} {
|
||||
var called int
|
||||
f := func() error {
|
||||
|
@ -286,6 +289,7 @@ func TestRetryOnError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFailuresCountMetric(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
role Role
|
||||
minFailedWatches int
|
||||
|
@ -320,3 +324,19 @@ func TestFailuresCountMetric(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeName(t *testing.T) {
|
||||
t.Parallel()
|
||||
node := &apiv1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
name, err := nodeName(node)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foo", name)
|
||||
|
||||
name, err = nodeName(cache.DeletedFinalStateUnknown{Key: "bar"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "bar", name)
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.Co
|
|||
}
|
||||
|
||||
func (n *Node) enqueue(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
key, err := nodeName(obj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ func makeEnumeratedNode(i int) *v1.Node {
|
|||
}
|
||||
|
||||
func TestNodeDiscoveryBeforeStart(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -95,6 +96,7 @@ func TestNodeDiscoveryBeforeStart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodeDiscoveryAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -124,6 +126,7 @@ func TestNodeDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodeDiscoveryDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
obj := makeEnumeratedNode(0)
|
||||
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{}, obj)
|
||||
|
||||
|
@ -142,6 +145,7 @@ func TestNodeDiscoveryDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNodeDiscoveryUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleNode, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
|
|
@ -95,8 +95,11 @@ func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedIn
|
|||
p.enqueuePodsForNode(node.Name)
|
||||
},
|
||||
DeleteFunc: func(o interface{}) {
|
||||
node := o.(*apiv1.Node)
|
||||
p.enqueuePodsForNode(node.Name)
|
||||
nodeName, err := nodeName(o)
|
||||
if err != nil {
|
||||
l.Error("Error getting Node name", "err", err)
|
||||
}
|
||||
p.enqueuePodsForNode(nodeName)
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -239,6 +239,7 @@ func expectedPodTargetGroupsWithNodeMeta(ns, nodeName string, nodeLabels map[str
|
|||
}
|
||||
|
||||
func TestPodDiscoveryBeforeRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -302,6 +303,7 @@ func TestPodDiscoveryBeforeRun(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryInitContainer(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
|
||||
|
||||
ns := "default"
|
||||
|
@ -329,6 +331,7 @@ func TestPodDiscoveryInitContainer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -343,6 +346,7 @@ func TestPodDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
obj := makePods()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{}, obj)
|
||||
|
||||
|
@ -362,6 +366,7 @@ func TestPodDiscoveryDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
obj := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testpod",
|
||||
|
@ -403,6 +408,7 @@ func TestPodDiscoveryUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{})
|
||||
initialPod := makePods()
|
||||
|
||||
|
@ -427,6 +433,7 @@ func TestPodDiscoveryUpdateEmptyPodIP(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryNamespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||
|
||||
expected := expectedPodTargetGroups("ns1")
|
||||
|
@ -448,6 +455,7 @@ func TestPodDiscoveryNamespaces(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryOwnNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RolePod, NamespaceDiscovery{IncludeOwnNamespace: true})
|
||||
|
||||
expected := expectedPodTargetGroups("own-ns")
|
||||
|
@ -466,6 +474,7 @@ func TestPodDiscoveryOwnNamespace(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryWithNodeMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
attachMetadata := AttachMetadataConfig{Node: true}
|
||||
n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata)
|
||||
nodeLbls := map[string]string{"l1": "v1"}
|
||||
|
@ -485,6 +494,7 @@ func TestPodDiscoveryWithNodeMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPodDiscoveryWithNodeMetadataUpdateNode(t *testing.T) {
|
||||
t.Parallel()
|
||||
nodeLbls := map[string]string{"l2": "v2"}
|
||||
attachMetadata := AttachMetadataConfig{Node: true}
|
||||
n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata)
|
||||
|
|
|
@ -118,6 +118,7 @@ func makeLoadBalancerService() *v1.Service {
|
|||
}
|
||||
|
||||
func TestServiceDiscoveryAdd(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -189,6 +190,7 @@ func TestServiceDiscoveryAdd(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceDiscoveryDelete(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -207,6 +209,7 @@ func TestServiceDiscoveryDelete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceDiscoveryUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{}, makeService())
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -251,6 +254,7 @@ func TestServiceDiscoveryUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceDiscoveryNamespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{Names: []string{"ns1", "ns2"}})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -303,6 +307,7 @@ func TestServiceDiscoveryNamespaces(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceDiscoveryOwnNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{IncludeOwnNamespace: true})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
@ -338,6 +343,7 @@ func TestServiceDiscoveryOwnNamespace(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceDiscoveryAllNamespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
n, c := makeDiscovery(RoleService, NamespaceDiscovery{})
|
||||
|
||||
k8sDiscoveryTest{
|
||||
|
|
|
@ -141,7 +141,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*linodeMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
@ -165,7 +165,7 @@ func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.Discove
|
|||
Timeout: time.Duration(conf.RefreshInterval),
|
||||
},
|
||||
)
|
||||
client.SetUserAgent(fmt.Sprintf("Prometheus/%s", version.Version))
|
||||
client.SetUserAgent(version.PrometheusUserAgent())
|
||||
d.client = &client
|
||||
|
||||
d.Discovery = refresh.NewDiscovery(
|
||||
|
@ -194,13 +194,12 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
events, err := d.client.ListEvents(ctx, &eventsOpts)
|
||||
if err != nil {
|
||||
var e *linodego.Error
|
||||
if errors.As(err, &e) && e.Code == http.StatusUnauthorized {
|
||||
// If we get a 401, the token doesn't have `events:read_only` scope.
|
||||
// Disable event polling and fallback to doing a full refresh every interval.
|
||||
d.eventPollingEnabled = false
|
||||
} else {
|
||||
if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) {
|
||||
return nil, err
|
||||
}
|
||||
// If we get a 401, the token doesn't have `events:read_only` scope.
|
||||
// Disable event polling and fallback to doing a full refresh every interval.
|
||||
d.eventPollingEnabled = false
|
||||
} else {
|
||||
// Event polling tells us changes the Linode API is aware of. Actions issued outside of the Linode API,
|
||||
// such as issuing a `shutdown` at the VM's console instead of using the API to power off an instance,
|
||||
|
|
|
@ -101,12 +101,12 @@ func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.
|
|||
|
||||
// Register the metrics.
|
||||
// We have to do this after setting all options, so that the name of the Manager is set.
|
||||
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||
mgr.metrics = metrics
|
||||
} else {
|
||||
metrics, err := NewManagerMetrics(registerer, mgr.name)
|
||||
if err != nil {
|
||||
logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
return nil
|
||||
}
|
||||
mgr.metrics = metrics
|
||||
|
||||
return mgr
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ package discovery
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -1209,9 +1210,9 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
|||
|
||||
c := map[string]Configs{
|
||||
"prometheus": {
|
||||
errorConfig{fmt.Errorf("tests error 0")},
|
||||
errorConfig{fmt.Errorf("tests error 1")},
|
||||
errorConfig{fmt.Errorf("tests error 2")},
|
||||
errorConfig{errors.New("tests error 0")},
|
||||
errorConfig{errors.New("tests error 1")},
|
||||
errorConfig{errors.New("tests error 2")},
|
||||
},
|
||||
}
|
||||
discoveryManager.ApplyConfig(c)
|
||||
|
|
|
@ -143,7 +143,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*marathonMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "marathon_sd")
|
||||
|
|
|
@ -15,6 +15,7 @@ package moby
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
|
@ -32,6 +33,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
|
@ -110,7 +112,7 @@ func (c *DockerSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error
|
|||
return err
|
||||
}
|
||||
if c.Host == "" {
|
||||
return fmt.Errorf("host missing")
|
||||
return errors.New("host missing")
|
||||
}
|
||||
if _, err = url.Parse(c.Host); err != nil {
|
||||
return err
|
||||
|
@ -131,7 +133,7 @@ type DockerDiscovery struct {
|
|||
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
||||
m, ok := metrics.(*dockerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &DockerDiscovery{
|
||||
|
@ -172,7 +174,7 @@ func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics disco
|
|||
}),
|
||||
client.WithScheme(hostURL.Scheme),
|
||||
client.WithHTTPHeaders(map[string]string{
|
||||
"User-Agent": userAgent,
|
||||
"User-Agent": version.PrometheusUserAgent(),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
@ -235,17 +237,16 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er
|
|||
if len(networks) == 0 {
|
||||
// Try to lookup shared networks
|
||||
for {
|
||||
if containerNetworkMode.IsContainer() {
|
||||
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
||||
if !exists {
|
||||
break
|
||||
}
|
||||
networks = tmpContainer.NetworkSettings.Networks
|
||||
containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode)
|
||||
if len(networks) > 0 {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if !containerNetworkMode.IsContainer() {
|
||||
break
|
||||
}
|
||||
tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()]
|
||||
if !exists {
|
||||
break
|
||||
}
|
||||
networks = tmpContainer.NetworkSettings.Networks
|
||||
containerNetworkMode = container.NetworkMode(tmpContainer.HostConfig.NetworkMode)
|
||||
if len(networks) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ package moby
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
@ -37,8 +38,6 @@ const (
|
|||
swarmLabel = model.MetaLabelPrefix + "dockerswarm_"
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
|
||||
|
||||
// DefaultDockerSwarmSDConfig is the default Docker Swarm SD configuration.
|
||||
var DefaultDockerSwarmSDConfig = DockerSwarmSDConfig{
|
||||
RefreshInterval: model.Duration(60 * time.Second),
|
||||
|
@ -99,7 +98,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
|
|||
return err
|
||||
}
|
||||
if c.Host == "" {
|
||||
return fmt.Errorf("host missing")
|
||||
return errors.New("host missing")
|
||||
}
|
||||
if _, err = url.Parse(c.Host); err != nil {
|
||||
return err
|
||||
|
@ -107,7 +106,7 @@ func (c *DockerSwarmSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) e
|
|||
switch c.Role {
|
||||
case "services", "nodes", "tasks":
|
||||
case "":
|
||||
return fmt.Errorf("role missing (one of: tasks, services, nodes)")
|
||||
return errors.New("role missing (one of: tasks, services, nodes)")
|
||||
default:
|
||||
return fmt.Errorf("invalid role %s, expected tasks, services, or nodes", c.Role)
|
||||
}
|
||||
|
@ -128,7 +127,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dockerswarmMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
@ -168,7 +167,7 @@ func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discov
|
|||
}),
|
||||
client.WithScheme(hostURL.Scheme),
|
||||
client.WithHTTPHeaders(map[string]string{
|
||||
"User-Agent": userAgent,
|
||||
"User-Agent": version.PrometheusUserAgent(),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ type Discovery struct {
|
|||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*nomadMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
return nil, errors.New("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
d := &Discovery{
|
||||
|
|
|
@ -127,19 +127,37 @@ func (m *SDMock) HandleServiceHashiCupsGet() {
|
|||
}
|
||||
|
||||
func TestConfiguredService(t *testing.T) {
|
||||
conf := &SDConfig{
|
||||
Server: "http://localhost:4646",
|
||||
testCases := []struct {
|
||||
name string
|
||||
server string
|
||||
acceptedURL bool
|
||||
}{
|
||||
{"invalid hostname URL", "http://foo.bar:4646", true},
|
||||
{"invalid even though accepted by parsing", "foo.bar:4646", true},
|
||||
{"valid address URL", "http://172.30.29.23:4646", true},
|
||||
{"invalid URL", "172.30.29.23:4646", false},
|
||||
}
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
conf := &SDConfig{
|
||||
Server: tc.server,
|
||||
}
|
||||
|
||||
_, err := NewDiscovery(conf, nil, metrics)
|
||||
require.NoError(t, err)
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
metrics := conf.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
metrics.Unregister()
|
||||
_, err := NewDiscovery(conf, nil, metrics)
|
||||
if tc.acceptedURL {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNomadSDRefresh(t *testing.T) {
|
||||
|
|
|
@ -20,10 +20,10 @@ import (
|
|||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
"github.com/gophercloud/gophercloud/v2"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/hypervisors"
|
||||
"github.com/gophercloud/gophercloud/v2/pagination"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -59,8 +59,7 @@ func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercl
|
|||
}
|
||||
|
||||
func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
h.provider.Context = ctx
|
||||
err := openstack.Authenticate(h.provider, *h.authOpts)
|
||||
err := openstack.Authenticate(ctx, h.provider, *h.authOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err)
|
||||
}
|
||||
|
@ -78,7 +77,7 @@ func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group
|
|||
// OpenStack API reference
|
||||
// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
|
||||
pagerHypervisors := hypervisors.List(client, nil)
|
||||
err = pagerHypervisors.EachPage(func(page pagination.Page) (bool, error) {
|
||||
err = pagerHypervisors.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) {
|
||||
hypervisorList, err := hypervisors.ExtractHypervisors(page)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not extract hypervisors: %w", err)
|
||||
|
|
|
@ -20,11 +20,12 @@ import (
|
|||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
"github.com/gophercloud/gophercloud/v2"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/ports"
|
||||
"github.com/gophercloud/gophercloud/v2/pagination"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
|
@ -72,13 +73,12 @@ func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gopherclou
|
|||
}
|
||||
|
||||
type floatingIPKey struct {
|
||||
id string
|
||||
fixed string
|
||||
deviceID string
|
||||
fixed string
|
||||
}
|
||||
|
||||
func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
i.provider.Context = ctx
|
||||
err := openstack.Authenticate(i.provider, *i.authOpts)
|
||||
err := openstack.Authenticate(ctx, i.provider, *i.authOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err)
|
||||
}
|
||||
|
@ -90,23 +90,60 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
return nil, fmt.Errorf("could not create OpenStack compute session: %w", err)
|
||||
}
|
||||
|
||||
networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{
|
||||
Region: i.region, Availability: i.availability,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create OpenStack network session: %w", err)
|
||||
}
|
||||
|
||||
// OpenStack API reference
|
||||
// https://developer.openstack.org/api-ref/compute/#list-floating-ips
|
||||
pagerFIP := floatingips.List(client)
|
||||
// https://docs.openstack.org/api-ref/network/v2/index.html#list-ports
|
||||
portPages, err := ports.List(networkClient, ports.ListOpts{}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list all ports: %w", err)
|
||||
}
|
||||
|
||||
allPorts, err := ports.ExtractPorts(portPages)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract Ports: %w", err)
|
||||
}
|
||||
|
||||
portList := make(map[string]string)
|
||||
for _, port := range allPorts {
|
||||
portList[port.ID] = port.DeviceID
|
||||
}
|
||||
|
||||
// OpenStack API reference
|
||||
// https://docs.openstack.org/api-ref/network/v2/index.html#list-floating-ips
|
||||
pagerFIP := floatingips.List(networkClient, floatingips.ListOpts{})
|
||||
floatingIPList := make(map[floatingIPKey]string)
|
||||
floatingIPPresent := make(map[string]struct{})
|
||||
err = pagerFIP.EachPage(func(page pagination.Page) (bool, error) {
|
||||
err = pagerFIP.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) {
|
||||
result, err := floatingips.ExtractFloatingIPs(page)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("could not extract floatingips: %w", err)
|
||||
}
|
||||
for _, ip := range result {
|
||||
// Skip not associated ips
|
||||
if ip.InstanceID == "" || ip.FixedIP == "" {
|
||||
if ip.PortID == "" || ip.FixedIP == "" {
|
||||
continue
|
||||
}
|
||||
floatingIPList[floatingIPKey{id: ip.InstanceID, fixed: ip.FixedIP}] = ip.IP
|
||||
floatingIPPresent[ip.IP] = struct{}{}
|
||||
|
||||
// Fetch deviceID from portList
|
||||
deviceID, ok := portList[ip.PortID]
|
||||
if !ok {
|
||||
i.logger.Warn("Floating IP PortID not found in portList", "PortID", ip.PortID)
|
||||
continue
|
||||
}
|
||||
|
||||
key := floatingIPKey{
|
||||
deviceID: deviceID,
|
||||
fixed: ip.FixedIP,
|
||||
}
|
||||
|
||||
floatingIPList[key] = ip.FloatingIP
|
||||
floatingIPPresent[ip.FloatingIP] = struct{}{}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
@ -123,7 +160,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
tg := &targetgroup.Group{
|
||||
Source: "OS_" + i.region,
|
||||
}
|
||||
err = pager.EachPage(func(page pagination.Page) (bool, error) {
|
||||
err = pager.EachPage(ctx, func(ctx context.Context, page pagination.Page) (bool, error) {
|
||||
if ctx.Err() != nil {
|
||||
return false, fmt.Errorf("could not extract instances: %w", ctx.Err())
|
||||
}
|
||||
|
@ -198,7 +235,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
}
|
||||
lbls[openstackLabelAddressPool] = model.LabelValue(pool)
|
||||
lbls[openstackLabelPrivateIP] = model.LabelValue(addr)
|
||||
if val, ok := floatingIPList[floatingIPKey{id: s.ID, fixed: addr}]; ok {
|
||||
if val, ok := floatingIPList[floatingIPKey{deviceID: s.ID, fixed: addr}]; ok {
|
||||
lbls[openstackLabelPublicIP] = model.LabelValue(val)
|
||||
}
|
||||
addr = net.JoinHostPort(addr, strconv.Itoa(i.port))
|
||||
|
|
|
@ -32,6 +32,7 @@ func (s *OpenstackSDInstanceTestSuite) SetupTest(t *testing.T) {
|
|||
|
||||
s.Mock.HandleServerListSuccessfully()
|
||||
s.Mock.HandleFloatingIPListSuccessfully()
|
||||
s.Mock.HandlePortsListSuccessfully()
|
||||
|
||||
s.Mock.HandleVersionsSuccessfully()
|
||||
s.Mock.HandleAuthSuccessfully()
|
||||
|
@ -66,7 +67,7 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Len(t, tg.Targets, 4)
|
||||
require.Len(t, tg.Targets, 6)
|
||||
|
||||
for i, lbls := range []model.LabelSet{
|
||||
{
|
||||
|
@ -119,6 +120,31 @@ func TestOpenstackSDInstanceRefresh(t *testing.T) {
|
|||
"__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"),
|
||||
"__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.33:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_instance_name": model.LabelValue("merp-project2"),
|
||||
"__meta_openstack_private_ip": model.LabelValue("10.0.0.33"),
|
||||
"__meta_openstack_address_pool": model.LabelValue("private"),
|
||||
"__meta_openstack_tag_env": model.LabelValue("prod"),
|
||||
"__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"),
|
||||
"__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.34:0"),
|
||||
"__meta_openstack_instance_flavor": model.LabelValue("m1.small"),
|
||||
"__meta_openstack_instance_id": model.LabelValue("87caf8ed-d92a-41f6-9dcd-d1399e39899f"),
|
||||
"__meta_openstack_instance_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_instance_name": model.LabelValue("merp-project2"),
|
||||
"__meta_openstack_private_ip": model.LabelValue("10.0.0.34"),
|
||||
"__meta_openstack_address_pool": model.LabelValue("private"),
|
||||
"__meta_openstack_tag_env": model.LabelValue("prod"),
|
||||
"__meta_openstack_public_ip": model.LabelValue("10.10.10.24"),
|
||||
"__meta_openstack_project_id": model.LabelValue("b78fef2305934dbbbeb9a10b4c326f7a"),
|
||||
"__meta_openstack_user_id": model.LabelValue("9349aff8be7545ac9d2f1d00999a23cd"),
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||
require.Equal(t, lbls, tg.Targets[i])
|
||||
|
|
200
discovery/openstack/loadbalancer.go
Normal file
200
discovery/openstack/loadbalancer.go
Normal file
|
@ -0,0 +1,200 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gophercloud/gophercloud/v2"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/listeners"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/loadbalancer/v2/loadbalancers"
|
||||
"github.com/gophercloud/gophercloud/v2/openstack/networking/v2/extensions/layer3/floatingips"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
openstackLabelLoadBalancerID = openstackLabelPrefix + "loadbalancer_id"
|
||||
openstackLabelLoadBalancerName = openstackLabelPrefix + "loadbalancer_name"
|
||||
openstackLabelLoadBalancerOperatingStatus = openstackLabelPrefix + "loadbalancer_operating_status"
|
||||
openstackLabelLoadBalancerProvisioningStatus = openstackLabelPrefix + "loadbalancer_provisioning_status"
|
||||
openstackLabelLoadBalancerAvailabilityZone = openstackLabelPrefix + "loadbalancer_availability_zone"
|
||||
openstackLabelLoadBalancerFloatingIP = openstackLabelPrefix + "loadbalancer_floating_ip"
|
||||
openstackLabelLoadBalancerVIP = openstackLabelPrefix + "loadbalancer_vip"
|
||||
openstackLabelLoadBalancerProvider = openstackLabelPrefix + "loadbalancer_provider"
|
||||
openstackLabelLoadBalancerTags = openstackLabelPrefix + "loadbalancer_tags"
|
||||
)
|
||||
|
||||
// LoadBalancerDiscovery discovers OpenStack load balancers.
|
||||
type LoadBalancerDiscovery struct {
|
||||
provider *gophercloud.ProviderClient
|
||||
authOpts *gophercloud.AuthOptions
|
||||
region string
|
||||
logger *slog.Logger
|
||||
availability gophercloud.Availability
|
||||
}
|
||||
|
||||
// NewLoadBalancerDiscovery returns a new loadbalancer discovery.
|
||||
func newLoadBalancerDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
||||
region string, availability gophercloud.Availability, l *slog.Logger,
|
||||
) *LoadBalancerDiscovery {
|
||||
if l == nil {
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
return &LoadBalancerDiscovery{
|
||||
provider: provider, authOpts: opts,
|
||||
region: region, availability: availability, logger: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *LoadBalancerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
err := openstack.Authenticate(ctx, i.provider, *i.authOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not authenticate to OpenStack: %w", err)
|
||||
}
|
||||
|
||||
client, err := openstack.NewLoadBalancerV2(i.provider, gophercloud.EndpointOpts{
|
||||
Region: i.region, Availability: i.availability,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create OpenStack load balancer session: %w", err)
|
||||
}
|
||||
|
||||
networkClient, err := openstack.NewNetworkV2(i.provider, gophercloud.EndpointOpts{
|
||||
Region: i.region, Availability: i.availability,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not create OpenStack network session: %w", err)
|
||||
}
|
||||
|
||||
allPages, err := loadbalancers.List(client, loadbalancers.ListOpts{}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list load balancers: %w", err)
|
||||
}
|
||||
|
||||
allLBs, err := loadbalancers.ExtractLoadBalancers(allPages)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract load balancers: %w", err)
|
||||
}
|
||||
|
||||
// Fetch all listeners in one API call
|
||||
listenerPages, err := listeners.List(client, listeners.ListOpts{}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list all listeners: %w", err)
|
||||
}
|
||||
|
||||
allListeners, err := listeners.ExtractListeners(listenerPages)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract all listeners: %w", err)
|
||||
}
|
||||
|
||||
// Create a map to group listeners by Load Balancer ID
|
||||
listenerMap := make(map[string][]listeners.Listener)
|
||||
for _, listener := range allListeners {
|
||||
// Iterate through each associated Load Balancer ID in the Loadbalancers array
|
||||
for _, lb := range listener.Loadbalancers {
|
||||
listenerMap[lb.ID] = append(listenerMap[lb.ID], listener)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch all floating IPs
|
||||
fipPages, err := floatingips.List(networkClient, floatingips.ListOpts{}).AllPages(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list all fips: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list floating IPs: %w", err)
|
||||
}
|
||||
|
||||
allFIPs, err := floatingips.ExtractFloatingIPs(fipPages)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to extract floating IPs: %w", err)
|
||||
}
|
||||
|
||||
// Create a map to associate floating IPs with their resource IDs
|
||||
fipMap := make(map[string]string) // Key: LoadBalancerID/PortID, Value: Floating IP
|
||||
for _, fip := range allFIPs {
|
||||
if fip.PortID != "" {
|
||||
fipMap[fip.PortID] = fip.FloatingIP
|
||||
}
|
||||
}
|
||||
|
||||
tg := &targetgroup.Group{
|
||||
Source: "OS_" + i.region,
|
||||
}
|
||||
|
||||
for _, lb := range allLBs {
|
||||
// Retrieve listeners for this load balancer from the map
|
||||
lbListeners, exists := listenerMap[lb.ID]
|
||||
if !exists || len(lbListeners) == 0 {
|
||||
i.logger.Debug("Got no listener", "loadbalancer", lb.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Variable to store the port of the first PROMETHEUS listener
|
||||
var listenerPort int
|
||||
hasPrometheusListener := false
|
||||
|
||||
// Check if any listener has the PROMETHEUS protocol
|
||||
for _, listener := range lbListeners {
|
||||
if listener.Protocol == "PROMETHEUS" {
|
||||
hasPrometheusListener = true
|
||||
listenerPort = listener.ProtocolPort
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Skip LBs without PROMETHEUS listener protocol
|
||||
if !hasPrometheusListener {
|
||||
i.logger.Debug("Got no PROMETHEUS listener", "loadbalancer", lb.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
labels := model.LabelSet{}
|
||||
addr := net.JoinHostPort(lb.VipAddress, strconv.Itoa(listenerPort))
|
||||
labels[model.AddressLabel] = model.LabelValue(addr)
|
||||
labels[openstackLabelLoadBalancerID] = model.LabelValue(lb.ID)
|
||||
labels[openstackLabelLoadBalancerName] = model.LabelValue(lb.Name)
|
||||
labels[openstackLabelLoadBalancerOperatingStatus] = model.LabelValue(lb.OperatingStatus)
|
||||
labels[openstackLabelLoadBalancerProvisioningStatus] = model.LabelValue(lb.ProvisioningStatus)
|
||||
labels[openstackLabelLoadBalancerAvailabilityZone] = model.LabelValue(lb.AvailabilityZone)
|
||||
labels[openstackLabelLoadBalancerVIP] = model.LabelValue(lb.VipAddress)
|
||||
labels[openstackLabelLoadBalancerProvider] = model.LabelValue(lb.Provider)
|
||||
labels[openstackLabelProjectID] = model.LabelValue(lb.ProjectID)
|
||||
|
||||
if len(lb.Tags) > 0 {
|
||||
labels[openstackLabelLoadBalancerTags] = model.LabelValue(strings.Join(lb.Tags, ","))
|
||||
}
|
||||
|
||||
if floatingIP, exists := fipMap[lb.VipPortID]; exists {
|
||||
labels[openstackLabelLoadBalancerFloatingIP] = model.LabelValue(floatingIP)
|
||||
}
|
||||
|
||||
tg.Targets = append(tg.Targets, labels)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []*targetgroup.Group{tg}, nil
|
||||
}
|
137
discovery/openstack/loadbalancer_test.go
Normal file
137
discovery/openstack/loadbalancer_test.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openstack
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type OpenstackSDLoadBalancerTestSuite struct {
|
||||
Mock *SDMock
|
||||
}
|
||||
|
||||
func (s *OpenstackSDLoadBalancerTestSuite) SetupTest(t *testing.T) {
|
||||
s.Mock = NewSDMock(t)
|
||||
s.Mock.Setup()
|
||||
|
||||
s.Mock.HandleLoadBalancerListSuccessfully()
|
||||
s.Mock.HandleListenersListSuccessfully()
|
||||
s.Mock.HandleFloatingIPListSuccessfully()
|
||||
|
||||
s.Mock.HandleVersionsSuccessfully()
|
||||
s.Mock.HandleAuthSuccessfully()
|
||||
}
|
||||
|
||||
func (s *OpenstackSDLoadBalancerTestSuite) openstackAuthSuccess() (refresher, error) {
|
||||
conf := SDConfig{
|
||||
IdentityEndpoint: s.Mock.Endpoint(),
|
||||
Password: "test",
|
||||
Username: "test",
|
||||
DomainName: "12345",
|
||||
Region: "RegionOne",
|
||||
Role: "loadbalancer",
|
||||
}
|
||||
return newRefresher(&conf, nil)
|
||||
}
|
||||
|
||||
func TestOpenstackSDLoadBalancerRefresh(t *testing.T) {
|
||||
mock := &OpenstackSDLoadBalancerTestSuite{}
|
||||
mock.SetupTest(t)
|
||||
|
||||
instance, err := mock.openstackAuthSuccess()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
tgs, err := instance.refresh(ctx)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tgs, 1)
|
||||
|
||||
tg := tgs[0]
|
||||
require.NotNil(t, tg)
|
||||
require.NotNil(t, tg.Targets)
|
||||
require.Len(t, tg.Targets, 4)
|
||||
|
||||
for i, lbls := range []model.LabelSet{
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.0.32:9273"),
|
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("ef079b0c-e610-4dfb-b1aa-b49f07ac48e5"),
|
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb1"),
|
||||
"__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"),
|
||||
"__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"),
|
||||
"__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.1.2"),
|
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.0.32"),
|
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"),
|
||||
"__meta_openstack_loadbalancer_tags": model.LabelValue("tag1,tag2"),
|
||||
"__meta_openstack_project_id": model.LabelValue("fcad67a6189847c4aecfa3c81a05783b"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.2.78:8080"),
|
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("d92c471e-8d3e-4b9f-b2b5-9c72a9e3ef54"),
|
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb3"),
|
||||
"__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"),
|
||||
"__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az3"),
|
||||
"__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.3.4"),
|
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.2.78"),
|
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"),
|
||||
"__meta_openstack_loadbalancer_tags": model.LabelValue("tag5,tag6"),
|
||||
"__meta_openstack_project_id": model.LabelValue("ac57f03dba1a4fdebff3e67201bc7a85"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.3.99:9090"),
|
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("f5c7e918-df38-4a5a-a7d4-d9c27ab2cf67"),
|
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb4"),
|
||||
"__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"),
|
||||
"__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az1"),
|
||||
"__meta_openstack_loadbalancer_floating_ip": model.LabelValue("192.168.4.5"),
|
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.3.99"),
|
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"),
|
||||
"__meta_openstack_project_id": model.LabelValue("fa8c372dfe4d4c92b0c4e3a2d9b3c9fa"),
|
||||
},
|
||||
{
|
||||
"__address__": model.LabelValue("10.0.4.88:9876"),
|
||||
"__meta_openstack_loadbalancer_id": model.LabelValue("e83a6d92-7a3e-4567-94b3-20c83b32a75e"),
|
||||
"__meta_openstack_loadbalancer_name": model.LabelValue("lb5"),
|
||||
"__meta_openstack_loadbalancer_operating_status": model.LabelValue("ONLINE"),
|
||||
"__meta_openstack_loadbalancer_provisioning_status": model.LabelValue("ACTIVE"),
|
||||
"__meta_openstack_loadbalancer_availability_zone": model.LabelValue("az4"),
|
||||
"__meta_openstack_loadbalancer_vip": model.LabelValue("10.0.4.88"),
|
||||
"__meta_openstack_loadbalancer_provider": model.LabelValue("amphora"),
|
||||
"__meta_openstack_project_id": model.LabelValue("a5d3b2e1e6f34cd9a5f7c2f01a6b8e29"),
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("item %d", i), func(t *testing.T) {
|
||||
require.Equal(t, lbls, tg.Targets[i])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenstackSDLoadBalancerRefreshWithDoneContext(t *testing.T) {
|
||||
mock := &OpenstackSDLoadBalancerTestSuite{}
|
||||
mock.SetupTest(t)
|
||||
|
||||
loadbalancer, _ := mock.openstackAuthSuccess()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
_, err := loadbalancer.refresh(ctx)
|
||||
require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||
}
|
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue