mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' into dimitar/ruler/unsupported-logger
Signed-off-by: Dimitar Dimitrov <dimitar.dimitrov@grafana.com>
This commit is contained in:
commit
5b2cfc5e45
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,4 +1,4 @@
|
||||||
blank_issues_enabled: false
|
blank_issues_enabled: true
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Prometheus Community Support
|
- name: Prometheus Community Support
|
||||||
url: https://prometheus.io/community/
|
url: https://prometheus.io/community/
|
||||||
|
|
4
.github/workflows/buf-lint.yml
vendored
4
.github/workflows/buf-lint.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
||||||
name: lint
|
name: lint
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
|
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||||
|
|
4
.github/workflows/buf.yml
vendored
4
.github/workflows/buf.yml
vendored
|
@ -12,8 +12,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.repository_owner == 'prometheus'
|
if: github.repository_owner == 'prometheus'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
|
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||||
with:
|
with:
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
- uses: bufbuild/buf-lint-action@06f9dd823d873146471cfaaf108a993fe00e5325 # v1.1.1
|
||||||
|
|
28
.github/workflows/ci.yml
vendored
28
.github/workflows/ci.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
image: quay.io/prometheus/golang-builder:1.23-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
with:
|
with:
|
||||||
|
@ -29,7 +29,7 @@ jobs:
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.23-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
- run: go test --tags=dedupelabels ./...
|
- run: go test --tags=dedupelabels ./...
|
||||||
|
@ -48,7 +48,7 @@ jobs:
|
||||||
# The go version in this image should be N-1 wrt test_go.
|
# The go version in this image should be N-1 wrt test_go.
|
||||||
image: quay.io/prometheus/golang-builder:1.22-base
|
image: quay.io/prometheus/golang-builder:1.22-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- run: make build
|
- run: make build
|
||||||
# Don't run NPM build; don't run race-detector.
|
# Don't run NPM build; don't run race-detector.
|
||||||
- run: make test GO_ONLY=1 test-flags=""
|
- run: make test GO_ONLY=1 test-flags=""
|
||||||
|
@ -62,7 +62,7 @@ jobs:
|
||||||
image: quay.io/prometheus/golang-builder:1.23-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/setup_environment
|
- uses: ./.github/promci/actions/setup_environment
|
||||||
with:
|
with:
|
||||||
|
@ -79,7 +79,7 @@ jobs:
|
||||||
name: Go tests on Windows
|
name: Go tests on Windows
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||||
with:
|
with:
|
||||||
go-version: 1.23.x
|
go-version: 1.23.x
|
||||||
|
@ -96,7 +96,7 @@ jobs:
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder:1.23-base
|
image: quay.io/prometheus/golang-builder:1.23-base
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- run: go install ./cmd/promtool/.
|
- run: go install ./cmd/promtool/.
|
||||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||||
|
@ -121,7 +121,7 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
thread: [ 0, 1, 2 ]
|
thread: [ 0, 1, 2 ]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/build
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
|
@ -146,7 +146,7 @@ jobs:
|
||||||
# Whenever the Go version is updated here, .promu.yml
|
# Whenever the Go version is updated here, .promu.yml
|
||||||
# should also be updated.
|
# should also be updated.
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/build
|
- uses: ./.github/promci/actions/build
|
||||||
with:
|
with:
|
||||||
|
@ -169,7 +169,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||||
with:
|
with:
|
||||||
|
@ -182,7 +182,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||||
with:
|
with:
|
||||||
|
@ -208,7 +208,7 @@ jobs:
|
||||||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/publish_main
|
- uses: ./.github/promci/actions/publish_main
|
||||||
with:
|
with:
|
||||||
|
@ -225,7 +225,7 @@ jobs:
|
||||||
||
|
||
|
||||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- uses: ./.github/promci/actions/publish_release
|
- uses: ./.github/promci/actions/publish_release
|
||||||
with:
|
with:
|
||||||
|
@ -240,10 +240,10 @@ jobs:
|
||||||
needs: [test_ui, codeql]
|
needs: [test_ui, codeql]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||||
- name: Install nodejs
|
- name: Install nodejs
|
||||||
uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3
|
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
|
||||||
with:
|
with:
|
||||||
node-version-file: "web/ui/.nvmrc"
|
node-version-file: "web/ui/.nvmrc"
|
||||||
registry-url: "https://registry.npmjs.org"
|
registry-url: "https://registry.npmjs.org"
|
||||||
|
|
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
|
@ -24,15 +24,15 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
|
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||||
with:
|
with:
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
|
uses: github/codeql-action/autobuild@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@4dd16135b69a43b6c8efb853346f8437d92d3c93 # v3.26.6
|
uses: github/codeql-action/analyze@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||||
|
|
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
||||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||||
steps:
|
steps:
|
||||||
- name: git checkout
|
- name: git checkout
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- name: Set docker hub repo name
|
- name: Set docker hub repo name
|
||||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||||
- name: Push README to Dockerhub
|
- name: Push README to Dockerhub
|
||||||
|
@ -40,7 +40,7 @@ jobs:
|
||||||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||||
steps:
|
steps:
|
||||||
- name: git checkout
|
- name: git checkout
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- name: Set quay.io org name
|
- name: Set quay.io org name
|
||||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||||
- name: Set quay.io repo name
|
- name: Set quay.io repo name
|
||||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
||||||
container:
|
container:
|
||||||
image: quay.io/prometheus/golang-builder
|
image: quay.io/prometheus/golang-builder
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||||
- run: ./scripts/sync_repo_files.sh
|
- run: ./scripts/sync_repo_files.sh
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||||
|
|
4
.github/workflows/scorecards.yml
vendored
4
.github/workflows/scorecards.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: "Checkout code"
|
- name: "Checkout code"
|
||||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6
|
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
|
@ -45,6 +45,6 @@ jobs:
|
||||||
|
|
||||||
# Upload the results to GitHub's code scanning dashboard.
|
# Upload the results to GitHub's code scanning dashboard.
|
||||||
- name: "Upload to code-scanning"
|
- name: "Upload to code-scanning"
|
||||||
uses: github/codeql-action/upload-sarif@4dd16135b69a43b6c8efb853346f8437d92d3c93 # tag=v3.26.6
|
uses: github/codeql-action/upload-sarif@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # tag=v3.26.10
|
||||||
with:
|
with:
|
||||||
sarif_file: results.sarif
|
sarif_file: results.sarif
|
||||||
|
|
|
@ -23,6 +23,7 @@ linters:
|
||||||
- usestdlibvars
|
- usestdlibvars
|
||||||
- whitespace
|
- whitespace
|
||||||
- loggercheck
|
- loggercheck
|
||||||
|
- sloglint
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
max-issues-per-linter: 0
|
max-issues-per-linter: 0
|
||||||
|
@ -100,8 +101,6 @@ linters-settings:
|
||||||
- (net/http.ResponseWriter).Write
|
- (net/http.ResponseWriter).Write
|
||||||
# No need to check for errors on server's shutdown.
|
# No need to check for errors on server's shutdown.
|
||||||
- (*net/http.Server).Shutdown
|
- (*net/http.Server).Shutdown
|
||||||
# Never check for logger errors.
|
|
||||||
- (github.com/go-kit/log.Logger).Log
|
|
||||||
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||||
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||||
goimports:
|
goimports:
|
||||||
|
@ -153,14 +152,4 @@ linters-settings:
|
||||||
disable:
|
disable:
|
||||||
- float-compare
|
- float-compare
|
||||||
- go-require
|
- go-require
|
||||||
enable:
|
enable-all: true
|
||||||
- bool-compare
|
|
||||||
- compares
|
|
||||||
- empty
|
|
||||||
- error-is-as
|
|
||||||
- error-nil
|
|
||||||
- expected-actual
|
|
||||||
- len
|
|
||||||
- require-error
|
|
||||||
- suite-dont-use-pkg
|
|
||||||
- suite-extra-assert-call
|
|
||||||
|
|
64
CHANGELOG.md
64
CHANGELOG.md
|
@ -1,5 +1,11 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## unreleased
|
||||||
|
|
||||||
|
* [CHANGE] `holt_winters` is now called `double_exponential_smoothing` and moves behind the [experimental-promql-functions feature flag](https://prometheus.io/docs/prometheus/latest/feature_flags/#experimental-promql-functions). #14930
|
||||||
|
* [CHANGE] API: The OTLP receiver endpoint can now be enabled using `--web.enable-otlp-receiver` instead of `--enable-feature=otlp-write-receiver`. #14894
|
||||||
|
* [BUGFIX] PromQL: Only return "possible non-counter" annotation when `rate` returns points. #14910
|
||||||
|
|
||||||
## 3.0.0-beta.0 / 2024-09-05
|
## 3.0.0-beta.0 / 2024-09-05
|
||||||
|
|
||||||
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
|
Release 3.0.0-beta.0 includes new features such as a brand new UI and UTF-8 support enabled by default. As a new major version, several breaking changes are introduced. The breaking changes are mainly around the removal of deprecated feature flags and CLI arguments, and the full list can be found below. Most users should be able to try this release out of the box without any configuration changes.
|
||||||
|
@ -16,32 +22,40 @@ As is traditional with a beta release, we do **not** recommend users install 3.0
|
||||||
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
* [CHANGE] Agent mode has been promoted to stable. The feature flag `agent` has been removed. To run Prometheus in Agent mode, use the new `--agent` cmdline arg instead. #14747
|
||||||
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
* [CHANGE] Remove deprecated `remote-write-receiver`,`promql-at-modifier`, and `promql-negative-offset` feature flags. #13456, #14526
|
||||||
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
* [CHANGE] Remove deprecated `storage.tsdb.allow-overlapping-blocks`, `alertmanager.timeout`, and `storage.tsdb.retention` flags. #14640, #14643
|
||||||
* [FEATURE] Promtool: Allow additional labels to be added to blocks created from openmetrics. #14402
|
|
||||||
* [FEATURE] OTLP receiver: Add new option `otlp.promote_resource_attributes`, for any OTel resource attributes that should be promoted to metric labels. #14200
|
|
||||||
* [FEATURE] Automatic reloading of the Prometheus configuration file at a specified interval #14769
|
|
||||||
* [ENHANCEMENT] OTLP receiver: Warn when encountering exponential histograms with zero count and non-zero sum. #14706
|
|
||||||
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
|
||||||
* [ENHANCEMENT] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
|
|
||||||
* [ENHANCEMENT] Scrape: Add support for logging scrape failures to a specified file. #14734
|
|
||||||
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
|
||||||
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
|
||||||
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
* [ENHANCEMENT] Move AM discovery page from "Monitoring status" to "Server status". #14875
|
||||||
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls.#14816
|
* [BUGFIX] Scrape: Do not override target parameter labels with config params. #11029
|
||||||
* [ENHANCEMENT] Add support for multiple listening addresses. #14665
|
|
||||||
* [ENHANCEMENT] Add the ability to set custom HTTP headers. #14817
|
## 2.55.0-rc.0 / 2024-09-20
|
||||||
* [BUGFIX] TSDB: Fix shard initialization after WAL repair. #14731
|
|
||||||
* [BUGFIX] UTF-8: Ensure correct validation when legacy mode turned on. #14736
|
* [FEATURE] Support UTF-8 characters in label names - feature flag `utf8-names`. #14482, #14880, #14736, #14727
|
||||||
* [BUGFIX] SD: Make discovery manager notify consumers of dropped targets for still defined jobs. #13147
|
* [FEATURE] Support config reload automatically - feature flag `auto-reload-config`. #14769
|
||||||
* [BUGFIX] SD: Prevent the new service discovery manager from storing stale targets. #13622
|
* [FEATURE] Scraping: Add the ability to set custom `http_headers` in config. #14817
|
||||||
* [BUGFIX] Remote Write 2.0: Ensure metadata records are sent from the WAL to remote write during WAL replay. #14766
|
* [FEATURE] Scraping: Support feature flag `created-timestamp-zero-ingestion` in OpenMetrics. #14356, #14815
|
||||||
* [BUGFIX] Scrape: Do no override target parameter labels with config params. #11029
|
* [FEATURE] Scraping: `scrape_failure_log_file` option to log failures to a file. #14734
|
||||||
* [BUGFIX] Scrape: Reset exemplar position when scraping histograms in protobuf. #14810
|
* [FEATURE] OTLP receiver: Optional promotion of resource attributes to series labels. #14200
|
||||||
* [BUGFIX] Native Histograms: Do not re-use spans between histograms. #14771
|
* [FEATURE] Remote-Write: Support Google Cloud Monitoring authorization. #14346
|
||||||
* [BUGFIX] Scrape: Only parse created timestamp if `created-timestamp-zero-ingestion` feature flag is enabled. This is as a lot of memory is used when parsing the created timestamp in the OM text format. #14815
|
* [FEATURE] Promtool: `tsdb create-blocks` new option to add labels. #14403
|
||||||
* [BUGFIX] TSDB: Fix panic in query during truncation with OOO head. #14831
|
* [FEATURE] Promtool: `promtool test` adds `--junit` flag to format results. #14506
|
||||||
* [BUGFIX] TSDB: Fix panic in chunk querier. #14874
|
* [ENHANCEMENT] OTLP receiver: Warn on exponential histograms with zero count and non-zero sum. #14706
|
||||||
* [BUGFIX] promql.Engine.Close: No-op if nil. #14861
|
* [ENHANCEMENT] OTLP receiver: Interrupt translation on context cancellation/timeout. #14612
|
||||||
* [BUGFIX] tsdb/wlog.Watcher.readSegmentForGC: Only count unknown record types against record_decode_failures_total metric. #14042
|
* [ENHANCEMENT] Remote Read client: Enable streaming remote read if the server supports it. #11379
|
||||||
|
* [ENHANCEMENT] Remote-Write: Don't reshard if we haven't successfully sent a sample since last update. #14450
|
||||||
|
* [ENHANCEMENT] PromQL: Delay deletion of `__name__` label to the end of the query evaluation. This is **experimental** and enabled under the feature-flag `promql-delayed-name-removal`. #14477
|
||||||
|
* [ENHANCEMENT] PromQL: Experimental `sort_by_label` and `sort_by_label_desc` sort by all labels when label is equal. #14655
|
||||||
|
* [ENHANCEMENT] PromQL: Clarify error message logged when Go runtime panic occurs during query evaluation. #14621
|
||||||
|
* [ENHANCEMENT] PromQL: Use Kahan summation for better accuracy in `avg` and `avg_over_time`. #14413
|
||||||
|
* [ENHANCEMENT] Tracing: Improve PromQL tracing, including showing the operation performed for aggregates, operators, and calls. #14816
|
||||||
|
* [ENHANCEMENT] API: Support multiple listening addresses. #14665
|
||||||
|
* [ENHANCEMENT] TSDB: Backward compatibility with upcoming index v3. #14934
|
||||||
|
* [PERF] TSDB: Query in-order and out-of-order series together. #14354, #14693, #14714, #14831, #14874, #14948
|
||||||
|
* [PERF] TSDB: Streamline reading of overlapping out-of-order head chunks. #14729
|
||||||
|
* [BUGFIX] SD: Fix dropping targets (with feature flag `new-service-discovery-manager`). #13147
|
||||||
|
* [BUGFIX] SD: Stop storing stale targets (with feature flag `new-service-discovery-manager`). #13622
|
||||||
|
* [BUGFIX] Scraping: exemplars could be dropped in protobuf scraping. #14810
|
||||||
|
* [BUGFIX] Remote-Write: fix metadata sending for experimental Remote-Write V2. #14766
|
||||||
|
* [BUGFIX] Remote-Write: Return 4xx not 5xx when timeseries has duplicate label. #14716
|
||||||
|
* [BUGFIX] Experimental Native Histograms: many fixes for incorrect results, panics, warnings. #14513, #14575, #14598, #14609, #14611, #14771, #14821
|
||||||
|
* [BUGFIX] TSDB: Only count unknown record types in `record_decode_failures_total` metric. #14042
|
||||||
|
|
||||||
## 2.54.1 / 2024-08-27
|
## 2.54.1 / 2024-08-27
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
General maintainers:
|
General maintainers:
|
||||||
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
|
* Bryan Boreham (bjboreham@gmail.com / @bboreham)
|
||||||
* Levi Harrison (levi@leviharrison.dev / @LeviHarrison)
|
|
||||||
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
|
* Ayoub Mrini (ayoubmrini424@gmail.com / @machine424)
|
||||||
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
|
* Julien Pivotto (roidelapluie@prometheus.io / @roidelapluie)
|
||||||
|
|
||||||
|
@ -17,7 +16,7 @@ Maintainers for specific parts of the codebase:
|
||||||
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
George Krajcsovits (<gyorgy.krajcsovits@grafana.com> / @krajorama)
|
||||||
* `storage`
|
* `storage`
|
||||||
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
* `remote`: Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (tom.wilkie@gmail.com / @tomwilkie), Nicolás Pazos ( <npazosmendez@gmail.com> / @npazosmendez), Alex Greenbank ( <alex.greenbank@grafana.com> / @alexgreenbank)
|
||||||
* `otlptranslator`: Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `otlptranslator`: Arthur Silva Sens (<arthursens2005@gmail.com> / @ArthurSens), Arve Knudsen (<arve.knudsen@gmail.com> / @aknuds1), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
* `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Jesús Vázquez (<jesus.vazquez@grafana.com> / @jesusvazquez)
|
||||||
* `web`
|
* `web`
|
||||||
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
* `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
|
||||||
|
|
|
@ -59,6 +59,7 @@ Release cadence of first pre-releases being cut is 6 weeks.
|
||||||
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
|
| v2.52 | 2024-04-22 | Arthur Silva Sens (GitHub: @ArthurSens) |
|
||||||
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
|
| v2.53 LTS | 2024-06-03 | George Krajcsovits (GitHub: @krajorama) |
|
||||||
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
|
| v2.54 | 2024-07-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
| v2.55 | 2024-09-17 | Bryan Boreham (GitHub: @bboreham) |
|
||||||
|
|
||||||
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice.
|
||||||
|
|
||||||
|
|
|
@ -18,11 +18,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof" // Comment this line to disable pprof endpoint.
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
@ -38,8 +38,6 @@ import (
|
||||||
"github.com/KimMachineGun/automemlimit/memlimit"
|
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/mwitkow/go-conntrack"
|
"github.com/mwitkow/go-conntrack"
|
||||||
"github.com/oklog/run"
|
"github.com/oklog/run"
|
||||||
|
@ -47,8 +45,8 @@ import (
|
||||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||||
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
|
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/promlog"
|
"github.com/prometheus/common/promslog"
|
||||||
promlogflag "github.com/prometheus/common/promlog/flag"
|
promslogflag "github.com/prometheus/common/promslog/flag"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
@ -77,10 +75,50 @@ import (
|
||||||
"github.com/prometheus/prometheus/tsdb/wlog"
|
"github.com/prometheus/prometheus/tsdb/wlog"
|
||||||
"github.com/prometheus/prometheus/util/documentcli"
|
"github.com/prometheus/prometheus/util/documentcli"
|
||||||
"github.com/prometheus/prometheus/util/logging"
|
"github.com/prometheus/prometheus/util/logging"
|
||||||
|
"github.com/prometheus/prometheus/util/notifications"
|
||||||
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
prom_runtime "github.com/prometheus/prometheus/util/runtime"
|
||||||
"github.com/prometheus/prometheus/web"
|
"github.com/prometheus/prometheus/web"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// klogv1OutputCallDepth is the stack depth where we can find the origin of this call.
|
||||||
|
const klogv1OutputCallDepth = 6
|
||||||
|
|
||||||
|
// klogv1DefaultPrefixLength is the length of the log prefix that we have to strip out.
|
||||||
|
const klogv1DefaultPrefixLength = 53
|
||||||
|
|
||||||
|
// klogv1Writer is used in SetOutputBySeverity call below to redirect any calls
|
||||||
|
// to klogv1 to end up in klogv2.
|
||||||
|
// This is a hack to support klogv1 without use of go-kit/log. It is inspired
|
||||||
|
// by klog's upstream klogv1/v2 coexistence example:
|
||||||
|
// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go
|
||||||
|
type klogv1Writer struct{}
|
||||||
|
|
||||||
|
// Write redirects klogv1 calls to klogv2.
|
||||||
|
// This is a hack to support klogv1 without use of go-kit/log. It is inspired
|
||||||
|
// by klog's upstream klogv1/v2 coexistence example:
|
||||||
|
// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go
|
||||||
|
func (kw klogv1Writer) Write(p []byte) (n int, err error) {
|
||||||
|
if len(p) < klogv1DefaultPrefixLength {
|
||||||
|
klogv2.InfoDepth(klogv1OutputCallDepth, string(p))
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p[0] {
|
||||||
|
case 'I':
|
||||||
|
klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||||
|
case 'W':
|
||||||
|
klogv2.WarningDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||||
|
case 'E':
|
||||||
|
klogv2.ErrorDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||||
|
case 'F':
|
||||||
|
klogv2.FatalDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||||
|
default:
|
||||||
|
klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
appName = "prometheus"
|
appName = "prometheus"
|
||||||
|
|
||||||
|
@ -153,6 +191,7 @@ type flagConfig struct {
|
||||||
queryMaxSamples int
|
queryMaxSamples int
|
||||||
RemoteFlushDeadline model.Duration
|
RemoteFlushDeadline model.Duration
|
||||||
nameEscapingScheme string
|
nameEscapingScheme string
|
||||||
|
maxNotificationsSubscribers int
|
||||||
|
|
||||||
enableAutoReload bool
|
enableAutoReload bool
|
||||||
autoReloadInterval model.Duration
|
autoReloadInterval model.Duration
|
||||||
|
@ -170,88 +209,82 @@ type flagConfig struct {
|
||||||
prometheusURL string
|
prometheusURL string
|
||||||
corsRegexString string
|
corsRegexString string
|
||||||
|
|
||||||
promlogConfig promlog.Config
|
|
||||||
|
|
||||||
promqlEnableDelayedNameRemoval bool
|
promqlEnableDelayedNameRemoval bool
|
||||||
|
|
||||||
|
promslogConfig promslog.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
// setFeatureListOptions sets the corresponding options from the featureList.
|
// setFeatureListOptions sets the corresponding options from the featureList.
|
||||||
func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
||||||
for _, f := range c.featureList {
|
for _, f := range c.featureList {
|
||||||
opts := strings.Split(f, ",")
|
opts := strings.Split(f, ",")
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
switch o {
|
switch o {
|
||||||
case "otlp-write-receiver":
|
|
||||||
c.web.EnableOTLPWriteReceiver = true
|
|
||||||
level.Info(logger).Log("msg", "Experimental OTLP write receiver enabled")
|
|
||||||
case "expand-external-labels":
|
case "expand-external-labels":
|
||||||
c.enableExpandExternalLabels = true
|
c.enableExpandExternalLabels = true
|
||||||
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
|
logger.Info("Experimental expand-external-labels enabled")
|
||||||
case "exemplar-storage":
|
case "exemplar-storage":
|
||||||
c.tsdb.EnableExemplarStorage = true
|
c.tsdb.EnableExemplarStorage = true
|
||||||
level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled")
|
logger.Info("Experimental in-memory exemplar storage enabled")
|
||||||
case "memory-snapshot-on-shutdown":
|
case "memory-snapshot-on-shutdown":
|
||||||
c.tsdb.EnableMemorySnapshotOnShutdown = true
|
c.tsdb.EnableMemorySnapshotOnShutdown = true
|
||||||
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
|
logger.Info("Experimental memory snapshot on shutdown enabled")
|
||||||
case "extra-scrape-metrics":
|
case "extra-scrape-metrics":
|
||||||
c.scrape.ExtraMetrics = true
|
c.scrape.ExtraMetrics = true
|
||||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
logger.Info("Experimental additional scrape metrics enabled")
|
||||||
case "metadata-wal-records":
|
case "metadata-wal-records":
|
||||||
c.scrape.AppendMetadata = true
|
c.scrape.AppendMetadata = true
|
||||||
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
|
logger.Info("Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||||
case "promql-per-step-stats":
|
case "promql-per-step-stats":
|
||||||
c.enablePerStepStats = true
|
c.enablePerStepStats = true
|
||||||
level.Info(logger).Log("msg", "Experimental per-step statistics reporting")
|
logger.Info("Experimental per-step statistics reporting")
|
||||||
case "auto-gomaxprocs":
|
case "auto-gomaxprocs":
|
||||||
c.enableAutoGOMAXPROCS = true
|
c.enableAutoGOMAXPROCS = true
|
||||||
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||||
case "auto-reload-config":
|
case "auto-reload-config":
|
||||||
c.enableAutoReload = true
|
c.enableAutoReload = true
|
||||||
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
|
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
|
||||||
c.autoReloadInterval, _ = model.ParseDuration("1s")
|
c.autoReloadInterval, _ = model.ParseDuration("1s")
|
||||||
}
|
}
|
||||||
level.Info(logger).Log("msg", fmt.Sprintf("Enabled automatic configuration file reloading. Checking for configuration changes every %s.", c.autoReloadInterval))
|
logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
|
||||||
case "auto-gomemlimit":
|
case "auto-gomemlimit":
|
||||||
c.enableAutoGOMEMLIMIT = true
|
c.enableAutoGOMEMLIMIT = true
|
||||||
level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||||
case "concurrent-rule-eval":
|
case "concurrent-rule-eval":
|
||||||
c.enableConcurrentRuleEval = true
|
c.enableConcurrentRuleEval = true
|
||||||
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
|
logger.Info("Experimental concurrent rule evaluation enabled.")
|
||||||
case "no-default-scrape-port":
|
|
||||||
c.scrape.NoDefaultPort = true
|
|
||||||
level.Info(logger).Log("msg", "No default port will be appended to scrape targets' addresses.")
|
|
||||||
case "promql-experimental-functions":
|
case "promql-experimental-functions":
|
||||||
parser.EnableExperimentalFunctions = true
|
parser.EnableExperimentalFunctions = true
|
||||||
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
|
logger.Info("Experimental PromQL functions enabled.")
|
||||||
case "native-histograms":
|
case "native-histograms":
|
||||||
c.tsdb.EnableNativeHistograms = true
|
c.tsdb.EnableNativeHistograms = true
|
||||||
c.scrape.EnableNativeHistogramsIngestion = true
|
c.scrape.EnableNativeHistogramsIngestion = true
|
||||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||||
case "ooo-native-histograms":
|
case "ooo-native-histograms":
|
||||||
c.tsdb.EnableOOONativeHistograms = true
|
c.tsdb.EnableOOONativeHistograms = true
|
||||||
level.Info(logger).Log("msg", "Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
|
logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
|
||||||
case "created-timestamp-zero-ingestion":
|
case "created-timestamp-zero-ingestion":
|
||||||
c.scrape.EnableCreatedTimestampZeroIngestion = true
|
c.scrape.EnableCreatedTimestampZeroIngestion = true
|
||||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||||
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||||
case "delayed-compaction":
|
case "delayed-compaction":
|
||||||
c.tsdb.EnableDelayedCompaction = true
|
c.tsdb.EnableDelayedCompaction = true
|
||||||
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
|
logger.Info("Experimental delayed compaction is enabled.")
|
||||||
case "promql-delayed-name-removal":
|
case "promql-delayed-name-removal":
|
||||||
c.promqlEnableDelayedNameRemoval = true
|
c.promqlEnableDelayedNameRemoval = true
|
||||||
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
|
logger.Info("Experimental PromQL delayed name removal enabled.")
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "old-ui":
|
case "old-ui":
|
||||||
c.web.UseOldUI = true
|
c.web.UseOldUI = true
|
||||||
level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.")
|
logger.Info("Serving previous version of the Prometheus web UI.")
|
||||||
default:
|
default:
|
||||||
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
|
logger.Warn("Unknown option for --enable-feature", "option", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,7 +318,7 @@ func main() {
|
||||||
Registerer: prometheus.DefaultRegisterer,
|
Registerer: prometheus.DefaultRegisterer,
|
||||||
Gatherer: prometheus.DefaultGatherer,
|
Gatherer: prometheus.DefaultGatherer,
|
||||||
},
|
},
|
||||||
promlogConfig: promlog.Config{},
|
promslogConfig: promslog.Config{},
|
||||||
}
|
}
|
||||||
|
|
||||||
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
|
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
|
||||||
|
@ -318,6 +351,9 @@ func main() {
|
||||||
a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners.").
|
a.Flag("web.max-connections", "Maximum number of simultaneous connections across all listeners.").
|
||||||
Default("512").IntVar(&cfg.web.MaxConnections)
|
Default("512").IntVar(&cfg.web.MaxConnections)
|
||||||
|
|
||||||
|
a.Flag("web.max-notifications-subscribers", "Limits the maximum number of subscribers that can concurrently receive live notifications. If the limit is reached, new subscription requests will be denied until existing connections close.").
|
||||||
|
Default("16").IntVar(&cfg.maxNotificationsSubscribers)
|
||||||
|
|
||||||
a.Flag("web.external-url",
|
a.Flag("web.external-url",
|
||||||
"The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.").
|
"The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.").
|
||||||
PlaceHolder("<URL>").StringVar(&cfg.prometheusURL)
|
PlaceHolder("<URL>").StringVar(&cfg.prometheusURL)
|
||||||
|
@ -344,6 +380,9 @@ func main() {
|
||||||
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
|
a.Flag("web.remote-write-receiver.accepted-protobuf-messages", fmt.Sprintf("List of the remote write protobuf messages to accept when receiving the remote writes. Supported values: %v", supportedRemoteWriteProtoMsgs.String())).
|
||||||
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
|
Default(supportedRemoteWriteProtoMsgs.Strings()...).SetValue(rwProtoMsgFlagValue(&cfg.web.AcceptRemoteWriteProtoMsgs))
|
||||||
|
|
||||||
|
a.Flag("web.enable-otlp-receiver", "Enable API endpoint accepting OTLP write requests.").
|
||||||
|
Default("false").BoolVar(&cfg.web.EnableOTLPWriteReceiver)
|
||||||
|
|
||||||
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
|
a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
|
||||||
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
|
Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
|
||||||
|
|
||||||
|
@ -383,6 +422,9 @@ func main() {
|
||||||
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||||
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
||||||
|
|
||||||
|
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
|
||||||
|
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
|
||||||
|
|
||||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||||
|
|
||||||
|
@ -474,12 +516,12 @@ func main() {
|
||||||
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
a.Flag("scrape.discovery-reload-interval", "Interval used by scrape manager to throttle target groups updates.").
|
||||||
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
Hidden().Default("5s").SetValue(&cfg.scrape.DiscoveryReloadInterval)
|
||||||
|
|
||||||
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, no-default-scrape-port, native-histograms, otlp-write-receiver, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: auto-gomemlimit, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-per-step-stats, promql-experimental-functions, extra-scrape-metrics, auto-gomaxprocs, native-histograms, created-timestamp-zero-ingestion, concurrent-rule-eval, delayed-compaction, old-ui. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
|
||||||
Default("").StringsVar(&cfg.featureList)
|
Default("").StringsVar(&cfg.featureList)
|
||||||
|
|
||||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||||
|
|
||||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
promslogflag.AddFlags(a, &cfg.promslogConfig)
|
||||||
|
|
||||||
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
||||||
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
|
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
|
||||||
|
@ -497,7 +539,13 @@ func main() {
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := promlog.New(&cfg.promlogConfig)
|
logger := promslog.New(&cfg.promslogConfig)
|
||||||
|
slog.SetDefault(logger)
|
||||||
|
|
||||||
|
notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer)
|
||||||
|
cfg.web.NotificationsSub = notifs.Sub
|
||||||
|
cfg.web.NotificationsGetter = notifs.Get
|
||||||
|
notifs.AddNotification(notifications.StartingUp)
|
||||||
|
|
||||||
if err := cfg.setFeatureListOptions(logger); err != nil {
|
if err := cfg.setFeatureListOptions(logger); err != nil {
|
||||||
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err))
|
fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing feature list: %w", err))
|
||||||
|
@ -547,12 +595,12 @@ func main() {
|
||||||
|
|
||||||
// Throw error for invalid config before starting other components.
|
// Throw error for invalid config before starting other components.
|
||||||
var cfgFile *config.Config
|
var cfgFile *config.Config
|
||||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
|
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil {
|
||||||
absPath, pathErr := filepath.Abs(cfg.configFile)
|
absPath, pathErr := filepath.Abs(cfg.configFile)
|
||||||
if pathErr != nil {
|
if pathErr != nil {
|
||||||
absPath = cfg.configFile
|
absPath = cfg.configFile
|
||||||
}
|
}
|
||||||
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
|
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
|
||||||
|
@ -560,7 +608,7 @@ func main() {
|
||||||
if pathErr != nil {
|
if pathErr != nil {
|
||||||
absPath = cfg.configFile
|
absPath = cfg.configFile
|
||||||
}
|
}
|
||||||
level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
}
|
}
|
||||||
if cfg.tsdb.EnableExemplarStorage {
|
if cfg.tsdb.EnableExemplarStorage {
|
||||||
|
@ -593,7 +641,7 @@ func main() {
|
||||||
if !agentMode {
|
if !agentMode {
|
||||||
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
|
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
|
||||||
cfg.tsdb.RetentionDuration = defaultRetentionDuration
|
cfg.tsdb.RetentionDuration = defaultRetentionDuration
|
||||||
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
|
logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for overflows. This limits our max retention to 100y.
|
// Check for overflows. This limits our max retention to 100y.
|
||||||
|
@ -603,7 +651,7 @@ func main() {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
cfg.tsdb.RetentionDuration = y
|
cfg.tsdb.RetentionDuration = y
|
||||||
level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String())
|
logger.Warn("Time retention value is too high. Limiting to: " + y.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max block size settings.
|
// Max block size settings.
|
||||||
|
@ -624,11 +672,8 @@ func main() {
|
||||||
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
|
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
|
||||||
noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval)
|
noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval)
|
||||||
|
|
||||||
// Above level 6, the k8s client would log bearer tokens in clear-text.
|
klogv2.SetSlogLogger(logger.With("component", "k8s_client_runtime"))
|
||||||
klog.ClampLevel(6)
|
klog.SetOutputBySeverity("INFO", klogv1Writer{})
|
||||||
klog.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
|
|
||||||
klogv2.ClampLevel(6)
|
|
||||||
klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
|
|
||||||
|
|
||||||
modeAppName := "Prometheus Server"
|
modeAppName := "Prometheus Server"
|
||||||
mode := "server"
|
mode := "server"
|
||||||
|
@ -637,20 +682,22 @@ func main() {
|
||||||
mode = "agent"
|
mode = "agent"
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(logger).Log("msg", "Starting "+modeAppName, "mode", mode, "version", version.Info())
|
logger.Info("Starting "+modeAppName, "mode", mode, "version", version.Info())
|
||||||
if bits.UintSize < 64 {
|
if bits.UintSize < 64 {
|
||||||
level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
|
logger.Warn("This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(logger).Log("build_context", version.BuildContext())
|
logger.Info("operational information",
|
||||||
level.Info(logger).Log("host_details", prom_runtime.Uname())
|
"build_context", version.BuildContext(),
|
||||||
level.Info(logger).Log("fd_limits", prom_runtime.FdLimits())
|
"host_details", prom_runtime.Uname(),
|
||||||
level.Info(logger).Log("vm_limits", prom_runtime.VMLimits())
|
"fd_limits", prom_runtime.FdLimits(),
|
||||||
|
"vm_limits", prom_runtime.VMLimits(),
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
||||||
scraper = &readyScrapeManager{}
|
scraper = &readyScrapeManager{}
|
||||||
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
|
remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
|
||||||
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -658,7 +705,7 @@ func main() {
|
||||||
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
||||||
ctxRule = context.Background()
|
ctxRule = context.Background()
|
||||||
|
|
||||||
notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier"))
|
notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier"))
|
||||||
|
|
||||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
||||||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||||
|
@ -673,37 +720,37 @@ func main() {
|
||||||
// they are not specific to an SD instance.
|
// they are not specific to an SD instance.
|
||||||
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer)
|
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
|
logger.Error("failed to register Kubernetes client metrics", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
|
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err)
|
logger.Error("failed to register service discovery metrics", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||||
if discoveryManagerScrape == nil {
|
if discoveryManagerScrape == nil {
|
||||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
logger.Error("failed to create a discovery manager scrape")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||||
if discoveryManagerNotify == nil {
|
if discoveryManagerNotify == nil {
|
||||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
logger.Error("failed to create a discovery manager notify")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
scrapeManager, err := scrape.NewManager(
|
scrapeManager, err := scrape.NewManager(
|
||||||
&cfg.scrape,
|
&cfg.scrape,
|
||||||
log.With(logger, "component", "scrape manager"),
|
logger.With("component", "scrape manager"),
|
||||||
func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) },
|
logging.NewJSONFileLogger,
|
||||||
fanoutStorage,
|
fanoutStorage,
|
||||||
prometheus.DefaultRegisterer,
|
prometheus.DefaultRegisterer,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err)
|
logger.Error("failed to create a scrape manager", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -716,10 +763,10 @@ func main() {
|
||||||
|
|
||||||
if cfg.enableAutoGOMAXPROCS {
|
if cfg.enableAutoGOMAXPROCS {
|
||||||
l := func(format string, a ...interface{}) {
|
l := func(format string, a ...interface{}) {
|
||||||
level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...))
|
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
||||||
}
|
}
|
||||||
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
||||||
level.Warn(logger).Log("component", "automaxprocs", "msg", "Failed to set GOMAXPROCS automatically", "err", err)
|
logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -733,17 +780,17 @@ func main() {
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !agentMode {
|
if !agentMode {
|
||||||
opts := promql.EngineOpts{
|
opts := promql.EngineOpts{
|
||||||
Logger: log.With(logger, "component", "query engine"),
|
Logger: logger.With("component", "query engine"),
|
||||||
Reg: prometheus.DefaultRegisterer,
|
Reg: prometheus.DefaultRegisterer,
|
||||||
MaxSamples: cfg.queryMaxSamples,
|
MaxSamples: cfg.queryMaxSamples,
|
||||||
Timeout: time.Duration(cfg.queryTimeout),
|
Timeout: time.Duration(cfg.queryTimeout),
|
||||||
ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")),
|
ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")),
|
||||||
LookbackDelta: time.Duration(cfg.lookbackDelta),
|
LookbackDelta: time.Duration(cfg.lookbackDelta),
|
||||||
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
|
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
|
||||||
// EnableAtModifier and EnableNegativeOffset have to be
|
// EnableAtModifier and EnableNegativeOffset have to be
|
||||||
|
@ -764,7 +811,7 @@ func main() {
|
||||||
Context: ctxRule,
|
Context: ctxRule,
|
||||||
ExternalURL: cfg.web.ExternalURL,
|
ExternalURL: cfg.web.ExternalURL,
|
||||||
Registerer: prometheus.DefaultRegisterer,
|
Registerer: prometheus.DefaultRegisterer,
|
||||||
Logger: log.With(logger, "component", "rule manager"),
|
Logger: logger.With("component", "rule manager"),
|
||||||
OutageTolerance: time.Duration(cfg.outageTolerance),
|
OutageTolerance: time.Duration(cfg.outageTolerance),
|
||||||
ForGracePeriod: time.Duration(cfg.forGracePeriod),
|
ForGracePeriod: time.Duration(cfg.forGracePeriod),
|
||||||
ResendDelay: time.Duration(cfg.resendDelay),
|
ResendDelay: time.Duration(cfg.resendDelay),
|
||||||
|
@ -815,7 +862,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
|
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
|
||||||
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
|
webHandler := web.New(logger.With("component", "web"), &cfg.web)
|
||||||
|
|
||||||
// Monitor outgoing connections on default transport with conntrack.
|
// Monitor outgoing connections on default transport with conntrack.
|
||||||
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
|
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
|
||||||
|
@ -942,18 +989,18 @@ func main() {
|
||||||
|
|
||||||
listeners, err := webHandler.Listeners()
|
listeners, err := webHandler.Listeners()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Unable to start web listeners", "err", err)
|
logger.Error("Unable to start web listener", "err", err)
|
||||||
if err := queryEngine.Close(); err != nil {
|
if err := queryEngine.Close(); err != nil {
|
||||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
logger.Warn("Closing query engine failed", "err", err)
|
||||||
}
|
}
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = toolkit_web.Validate(*webConfig)
|
err = toolkit_web.Validate(*webConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err)
|
logger.Error("Unable to validate web configuration file", "err", err)
|
||||||
if err := queryEngine.Close(); err != nil {
|
if err := queryEngine.Close(); err != nil {
|
||||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
logger.Warn("Closing query engine failed", "err", err)
|
||||||
}
|
}
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -969,21 +1016,22 @@ func main() {
|
||||||
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
|
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
|
||||||
select {
|
select {
|
||||||
case sig := <-term:
|
case sig := <-term:
|
||||||
level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String())
|
logger.Warn("Received an OS signal, exiting gracefully...", "signal", sig.String())
|
||||||
reloadReady.Close()
|
reloadReady.Close()
|
||||||
case <-webHandler.Quit():
|
case <-webHandler.Quit():
|
||||||
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")
|
logger.Warn("Received termination request via web service, exiting gracefully...")
|
||||||
case <-cancel:
|
case <-cancel:
|
||||||
reloadReady.Close()
|
reloadReady.Close()
|
||||||
}
|
}
|
||||||
if err := queryEngine.Close(); err != nil {
|
if err := queryEngine.Close(); err != nil {
|
||||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
logger.Warn("Closing query engine failed", "err", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
func(err error) {
|
func(err error) {
|
||||||
close(cancel)
|
close(cancel)
|
||||||
webHandler.SetReady(false)
|
webHandler.SetReady(web.Stopping)
|
||||||
|
notifs.AddNotification(notifications.ShuttingDown)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -992,11 +1040,11 @@ func main() {
|
||||||
g.Add(
|
g.Add(
|
||||||
func() error {
|
func() error {
|
||||||
err := discoveryManagerScrape.Run()
|
err := discoveryManagerScrape.Run()
|
||||||
level.Info(logger).Log("msg", "Scrape discovery manager stopped")
|
logger.Info("Scrape discovery manager stopped")
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
func(err error) {
|
func(err error) {
|
||||||
level.Info(logger).Log("msg", "Stopping scrape discovery manager...")
|
logger.Info("Stopping scrape discovery manager...")
|
||||||
cancelScrape()
|
cancelScrape()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -1006,11 +1054,11 @@ func main() {
|
||||||
g.Add(
|
g.Add(
|
||||||
func() error {
|
func() error {
|
||||||
err := discoveryManagerNotify.Run()
|
err := discoveryManagerNotify.Run()
|
||||||
level.Info(logger).Log("msg", "Notify discovery manager stopped")
|
logger.Info("Notify discovery manager stopped")
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
func(err error) {
|
func(err error) {
|
||||||
level.Info(logger).Log("msg", "Stopping notify discovery manager...")
|
logger.Info("Stopping notify discovery manager...")
|
||||||
cancelNotify()
|
cancelNotify()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -1039,7 +1087,7 @@ func main() {
|
||||||
<-reloadReady.C
|
<-reloadReady.C
|
||||||
|
|
||||||
err := scrapeManager.Run(discoveryManagerScrape.SyncCh())
|
err := scrapeManager.Run(discoveryManagerScrape.SyncCh())
|
||||||
level.Info(logger).Log("msg", "Scrape manager stopped")
|
logger.Info("Scrape manager stopped")
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
func(err error) {
|
func(err error) {
|
||||||
|
@ -1047,7 +1095,7 @@ func main() {
|
||||||
// so that it doesn't try to write samples to a closed storage.
|
// so that it doesn't try to write samples to a closed storage.
|
||||||
// We should also wait for rule manager to be fully stopped to ensure
|
// We should also wait for rule manager to be fully stopped to ensure
|
||||||
// we don't trigger any false positive alerts for rules using absent().
|
// we don't trigger any false positive alerts for rules using absent().
|
||||||
level.Info(logger).Log("msg", "Stopping scrape manager...")
|
logger.Info("Stopping scrape manager...")
|
||||||
scrapeManager.Stop()
|
scrapeManager.Stop()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -1078,10 +1126,18 @@ func main() {
|
||||||
if cfg.enableAutoReload {
|
if cfg.enableAutoReload {
|
||||||
checksum, err = config.GenerateChecksum(cfg.configFile)
|
checksum, err = config.GenerateChecksum(cfg.configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Failed to generate initial checksum for configuration file", "err", err)
|
logger.Error("Failed to generate initial checksum for configuration file", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
callback := func(success bool) {
|
||||||
|
if success {
|
||||||
|
notifs.DeleteNotification(notifications.ConfigurationUnsuccessful)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
notifs.AddNotification(notifications.ConfigurationUnsuccessful)
|
||||||
|
}
|
||||||
|
|
||||||
g.Add(
|
g.Add(
|
||||||
func() error {
|
func() error {
|
||||||
<-reloadReady.C
|
<-reloadReady.C
|
||||||
|
@ -1089,18 +1145,18 @@ func main() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-hup:
|
case <-hup:
|
||||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
logger.Error("Error reloading config", "err", err)
|
||||||
} else if cfg.enableAutoReload {
|
} else if cfg.enableAutoReload {
|
||||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||||
checksum = currentChecksum
|
checksum = currentChecksum
|
||||||
} else {
|
} else {
|
||||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case rc := <-webHandler.Reload():
|
case rc := <-webHandler.Reload():
|
||||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
logger.Error("Error reloading config", "err", err)
|
||||||
rc <- err
|
rc <- err
|
||||||
} else {
|
} else {
|
||||||
rc <- nil
|
rc <- nil
|
||||||
|
@ -1108,7 +1164,7 @@ func main() {
|
||||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||||
checksum = currentChecksum
|
checksum = currentChecksum
|
||||||
} else {
|
} else {
|
||||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1118,16 +1174,14 @@ func main() {
|
||||||
}
|
}
|
||||||
currentChecksum, err := config.GenerateChecksum(cfg.configFile)
|
currentChecksum, err := config.GenerateChecksum(cfg.configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||||
|
} else if currentChecksum == checksum {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if currentChecksum == checksum {
|
logger.Info("Configuration file change detected, reloading the configuration.")
|
||||||
continue
|
|
||||||
}
|
|
||||||
level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.")
|
|
||||||
|
|
||||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
logger.Error("Error reloading config", "err", err)
|
||||||
} else {
|
} else {
|
||||||
checksum = currentChecksum
|
checksum = currentChecksum
|
||||||
}
|
}
|
||||||
|
@ -1156,14 +1210,15 @@ func main() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, reloaders...); err != nil {
|
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, func(bool) {}, reloaders...); err != nil {
|
||||||
return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err)
|
return fmt.Errorf("error loading config from %q: %w", cfg.configFile, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
reloadReady.Close()
|
reloadReady.Close()
|
||||||
|
|
||||||
webHandler.SetReady(true)
|
webHandler.SetReady(web.Ready)
|
||||||
level.Info(logger).Log("msg", "Server is ready to receive web requests.")
|
notifs.DeleteNotification(notifications.StartingUp)
|
||||||
|
logger.Info("Server is ready to receive web requests.")
|
||||||
<-cancel
|
<-cancel
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -1178,7 +1233,7 @@ func main() {
|
||||||
cancel := make(chan struct{})
|
cancel := make(chan struct{})
|
||||||
g.Add(
|
g.Add(
|
||||||
func() error {
|
func() error {
|
||||||
level.Info(logger).Log("msg", "Starting TSDB ...")
|
logger.Info("Starting TSDB ...")
|
||||||
if cfg.tsdb.WALSegmentSize != 0 {
|
if cfg.tsdb.WALSegmentSize != 0 {
|
||||||
if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 {
|
if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 {
|
||||||
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
|
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
|
||||||
|
@ -1197,13 +1252,13 @@ func main() {
|
||||||
|
|
||||||
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
|
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
|
||||||
case "NFS_SUPER_MAGIC":
|
case "NFS_SUPER_MAGIC":
|
||||||
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType)
|
||||||
default:
|
default:
|
||||||
level.Info(logger).Log("fs_type", fsType)
|
logger.Info("filesystem information", "fs_type", fsType)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(logger).Log("msg", "TSDB started")
|
logger.Info("TSDB started")
|
||||||
level.Debug(logger).Log("msg", "TSDB options",
|
logger.Debug("TSDB options",
|
||||||
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
|
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
|
||||||
"MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
|
"MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
|
||||||
"MaxBytes", cfg.tsdb.MaxBytes,
|
"MaxBytes", cfg.tsdb.MaxBytes,
|
||||||
|
@ -1222,7 +1277,7 @@ func main() {
|
||||||
},
|
},
|
||||||
func(err error) {
|
func(err error) {
|
||||||
if err := fanoutStorage.Close(); err != nil {
|
if err := fanoutStorage.Close(); err != nil {
|
||||||
level.Error(logger).Log("msg", "Error stopping storage", "err", err)
|
logger.Error("Error stopping storage", "err", err)
|
||||||
}
|
}
|
||||||
close(cancel)
|
close(cancel)
|
||||||
},
|
},
|
||||||
|
@ -1234,7 +1289,7 @@ func main() {
|
||||||
cancel := make(chan struct{})
|
cancel := make(chan struct{})
|
||||||
g.Add(
|
g.Add(
|
||||||
func() error {
|
func() error {
|
||||||
level.Info(logger).Log("msg", "Starting WAL storage ...")
|
logger.Info("Starting WAL storage ...")
|
||||||
if cfg.agent.WALSegmentSize != 0 {
|
if cfg.agent.WALSegmentSize != 0 {
|
||||||
if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 {
|
if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 {
|
||||||
return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB")
|
return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB")
|
||||||
|
@ -1253,13 +1308,13 @@ func main() {
|
||||||
|
|
||||||
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
|
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
|
||||||
case "NFS_SUPER_MAGIC":
|
case "NFS_SUPER_MAGIC":
|
||||||
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
||||||
default:
|
default:
|
||||||
level.Info(logger).Log("fs_type", fsType)
|
logger.Info(fsType)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(logger).Log("msg", "Agent WAL storage started")
|
logger.Info("Agent WAL storage started")
|
||||||
level.Debug(logger).Log("msg", "Agent WAL storage options",
|
logger.Debug("Agent WAL storage options",
|
||||||
"WALSegmentSize", cfg.agent.WALSegmentSize,
|
"WALSegmentSize", cfg.agent.WALSegmentSize,
|
||||||
"WALCompression", cfg.agent.WALCompression,
|
"WALCompression", cfg.agent.WALCompression,
|
||||||
"StripeSize", cfg.agent.StripeSize,
|
"StripeSize", cfg.agent.StripeSize,
|
||||||
|
@ -1277,7 +1332,7 @@ func main() {
|
||||||
},
|
},
|
||||||
func(e error) {
|
func(e error) {
|
||||||
if err := fanoutStorage.Close(); err != nil {
|
if err := fanoutStorage.Close(); err != nil {
|
||||||
level.Error(logger).Log("msg", "Error stopping storage", "err", err)
|
logger.Error("Error stopping storage", "err", err)
|
||||||
}
|
}
|
||||||
close(cancel)
|
close(cancel)
|
||||||
},
|
},
|
||||||
|
@ -1311,7 +1366,7 @@ func main() {
|
||||||
<-reloadReady.C
|
<-reloadReady.C
|
||||||
|
|
||||||
notifierManager.Run(discoveryManagerNotify.SyncCh())
|
notifierManager.Run(discoveryManagerNotify.SyncCh())
|
||||||
level.Info(logger).Log("msg", "Notifier manager stopped")
|
logger.Info("Notifier manager stopped")
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
func(err error) {
|
func(err error) {
|
||||||
|
@ -1320,16 +1375,16 @@ func main() {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if err := g.Run(); err != nil {
|
if err := g.Run(); err != nil {
|
||||||
level.Error(logger).Log("err", err)
|
logger.Error("Error running goroutines from run.Group", "err", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
level.Info(logger).Log("msg", "See you next time!")
|
logger.Info("See you next time!")
|
||||||
}
|
}
|
||||||
|
|
||||||
func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) {
|
func openDBWithMetrics(dir string, logger *slog.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) {
|
||||||
db, err := tsdb.Open(
|
db, err := tsdb.Open(
|
||||||
dir,
|
dir,
|
||||||
log.With(logger, "component", "tsdb"),
|
logger.With("component", "tsdb"),
|
||||||
reg,
|
reg,
|
||||||
opts,
|
opts,
|
||||||
stats,
|
stats,
|
||||||
|
@ -1382,17 +1437,19 @@ type reloader struct {
|
||||||
reloader func(*config.Config) error
|
reloader func(*config.Config) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
|
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
timings := []interface{}{}
|
timingsLogger := logger
|
||||||
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
logger.Info("Loading configuration file", "filename", filename)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
configSuccess.Set(1)
|
configSuccess.Set(1)
|
||||||
configSuccessTime.SetToCurrentTime()
|
configSuccessTime.SetToCurrentTime()
|
||||||
|
callback(true)
|
||||||
} else {
|
} else {
|
||||||
configSuccess.Set(0)
|
configSuccess.Set(0)
|
||||||
|
callback(false)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -1411,10 +1468,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
||||||
for _, rl := range rls {
|
for _, rl := range rls {
|
||||||
rstart := time.Now()
|
rstart := time.Now()
|
||||||
if err := rl.reloader(conf); err != nil {
|
if err := rl.reloader(conf); err != nil {
|
||||||
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
|
logger.Error("Failed to apply configuration", "err", err)
|
||||||
failed = true
|
failed = true
|
||||||
}
|
}
|
||||||
timings = append(timings, rl.name, time.Since(rstart))
|
timingsLogger = timingsLogger.With((rl.name), time.Since(rstart))
|
||||||
}
|
}
|
||||||
if failed {
|
if failed {
|
||||||
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||||
|
@ -1422,7 +1479,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
||||||
|
|
||||||
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
|
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
|
||||||
if oldGoGC != conf.Runtime.GoGC {
|
if oldGoGC != conf.Runtime.GoGC {
|
||||||
level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
|
logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
|
||||||
}
|
}
|
||||||
// Write the new setting out to the ENV var for runtime API output.
|
// Write the new setting out to the ENV var for runtime API output.
|
||||||
if conf.Runtime.GoGC >= 0 {
|
if conf.Runtime.GoGC >= 0 {
|
||||||
|
@ -1432,8 +1489,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
||||||
}
|
}
|
||||||
|
|
||||||
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
||||||
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
|
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
|
||||||
level.Info(logger).Log(append(l, timings...)...)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1599,6 +1655,10 @@ func (n notReadyAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n notReadyAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) {
|
||||||
|
return 0, tsdb.ErrNotReady
|
||||||
|
}
|
||||||
|
|
||||||
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
func (n notReadyAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) {
|
||||||
return 0, tsdb.ErrNotReady
|
return 0, tsdb.ErrNotReady
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,9 +31,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
|
@ -295,7 +295,7 @@ func TestTimeMetrics(t *testing.T) {
|
||||||
tmpDir := t.TempDir()
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil)
|
db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
require.NoError(t, db.Close())
|
require.NoError(t, db.Close())
|
||||||
|
|
|
@ -125,12 +125,61 @@ func (p *queryLogTest) query(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 200, r.StatusCode)
|
require.Equal(t, 200, r.StatusCode)
|
||||||
case ruleOrigin:
|
case ruleOrigin:
|
||||||
time.Sleep(2 * time.Second)
|
// Poll the /api/v1/rules endpoint until a new rule evaluation is detected.
|
||||||
|
var lastEvalTime time.Time
|
||||||
|
for {
|
||||||
|
r, err := http.Get(fmt.Sprintf("http://%s:%d/api/v1/rules", p.host, p.port))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
rulesBody, err := io.ReadAll(r.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer r.Body.Close()
|
||||||
|
|
||||||
|
// Parse the rules response to find the last evaluation time.
|
||||||
|
newEvalTime := parseLastEvaluation(rulesBody)
|
||||||
|
if newEvalTime.After(lastEvalTime) {
|
||||||
|
if !lastEvalTime.IsZero() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
lastEvalTime = newEvalTime
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
panic("can't query this origin")
|
panic("can't query this origin")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseLastEvaluation extracts the last evaluation timestamp from the /api/v1/rules response.
|
||||||
|
func parseLastEvaluation(rulesBody []byte) time.Time {
|
||||||
|
var ruleResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Data struct {
|
||||||
|
Groups []struct {
|
||||||
|
Rules []struct {
|
||||||
|
LastEvaluation string `json:"lastEvaluation"`
|
||||||
|
} `json:"rules"`
|
||||||
|
} `json:"groups"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err := json.Unmarshal(rulesBody, &ruleResponse)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, group := range ruleResponse.Data.Groups {
|
||||||
|
for _, rule := range group.Rules {
|
||||||
|
if evalTime, err := time.Parse(time.RFC3339Nano, rule.LastEvaluation); err == nil {
|
||||||
|
return evalTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
// queryString returns the expected queryString of a this test.
|
// queryString returns the expected queryString of a this test.
|
||||||
func (p *queryLogTest) queryString() string {
|
func (p *queryLogTest) queryString() string {
|
||||||
switch p.origin {
|
switch p.origin {
|
||||||
|
@ -322,7 +371,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Len(t, ql, qc)
|
require.Len(t, ql, qc)
|
||||||
} else {
|
} else {
|
||||||
require.Greater(t, len(ql), qc, "no queries logged")
|
require.GreaterOrEqual(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
qc = len(ql)
|
qc = len(ql)
|
||||||
|
@ -353,7 +402,7 @@ func (p *queryLogTest) run(t *testing.T) {
|
||||||
if p.exactQueryCount() {
|
if p.exactQueryCount() {
|
||||||
require.Len(t, ql, qc)
|
require.Len(t, ql, qc)
|
||||||
} else {
|
} else {
|
||||||
require.Greater(t, len(ql), qc, "no queries logged")
|
require.GreaterOrEqual(t, len(ql), qc, "no queries logged")
|
||||||
}
|
}
|
||||||
p.validateLastQuery(t, ql)
|
p.validateLastQuery(t, ql)
|
||||||
|
|
||||||
|
@ -393,6 +442,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine {
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
var q queryLogLine
|
var q queryLogLine
|
||||||
|
|
|
@ -21,9 +21,10 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/oklog/ulid"
|
"github.com/oklog/ulid"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/textparse"
|
"github.com/prometheus/prometheus/model/textparse"
|
||||||
"github.com/prometheus/prometheus/tsdb"
|
"github.com/prometheus/prometheus/tsdb"
|
||||||
|
@ -120,7 +121,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
||||||
// also need to append samples throughout the whole block range. To allow that, we
|
// also need to append samples throughout the whole block range. To allow that, we
|
||||||
// pretend that the block is twice as large here, but only really add sample in the
|
// pretend that the block is twice as large here, but only really add sample in the
|
||||||
// original interval later.
|
// original interval later.
|
||||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration)
|
w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("block writer: %w", err)
|
return fmt.Errorf("block writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,13 +32,13 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/kingpin/v2"
|
"github.com/alecthomas/kingpin/v2"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/google/pprof/profile"
|
"github.com/google/pprof/profile"
|
||||||
"github.com/prometheus/client_golang/api"
|
"github.com/prometheus/client_golang/api"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
"github.com/prometheus/exporter-toolkit/web"
|
"github.com/prometheus/exporter-toolkit/web"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -291,7 +291,7 @@ func main() {
|
||||||
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
|
promQLLabelsDeleteQuery := promQLLabelsDeleteCmd.Arg("query", "PromQL query.").Required().String()
|
||||||
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
|
promQLLabelsDeleteName := promQLLabelsDeleteCmd.Arg("name", "Name of the label to delete.").Required().String()
|
||||||
|
|
||||||
featureList := app.Flag("enable-feature", "Comma separated feature names to enable (only PromQL related and no-default-scrape-port). See https://prometheus.io/docs/prometheus/latest/feature_flags/ for the options and more details.").Default("").Strings()
|
featureList := app.Flag("enable-feature", "Comma separated feature names to enable. Currently unused.").Default("").Strings()
|
||||||
|
|
||||||
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
documentationCmd := app.Command("write-documentation", "Generate command line documentation. Internal use.").Hidden()
|
||||||
|
|
||||||
|
@ -321,24 +321,21 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var noDefaultScrapePort bool
|
|
||||||
for _, f := range *featureList {
|
for _, f := range *featureList {
|
||||||
opts := strings.Split(f, ",")
|
opts := strings.Split(f, ",")
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
switch o {
|
switch o {
|
||||||
case "no-default-scrape-port":
|
|
||||||
noDefaultScrapePort = true
|
|
||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
fmt.Printf(" WARNING: Unknown option for --enable-feature: %q\n", o)
|
fmt.Printf(" WARNING: --enable-feature is currently a no-op")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch parsedCmd {
|
switch parsedCmd {
|
||||||
case sdCheckCmd.FullCommand():
|
case sdCheckCmd.FullCommand():
|
||||||
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, noDefaultScrapePort, prometheus.DefaultRegisterer))
|
os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout, prometheus.DefaultRegisterer))
|
||||||
|
|
||||||
case checkConfigCmd.FullCommand():
|
case checkConfigCmd.FullCommand():
|
||||||
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, newLintConfig(*checkConfigLint, *checkConfigLintFatal), *configFiles...))
|
||||||
|
@ -578,7 +575,7 @@ func checkFileExists(fn string) error {
|
||||||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
||||||
fmt.Println("Checking", filename)
|
fmt.Println("Checking", filename)
|
||||||
|
|
||||||
cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger())
|
cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1185,7 +1182,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
|
||||||
return fmt.Errorf("new api client error: %w", err)
|
return fmt.Errorf("new api client error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api)
|
ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api)
|
||||||
errs := ruleImporter.loadGroups(ctx, files)
|
errs := ruleImporter.loadGroups(ctx, files)
|
||||||
for _, err := range errs {
|
for _, err := range errs {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1219,7 +1216,7 @@ func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *c
|
||||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, tg := range targetGroups {
|
for _, tg := range targetGroups {
|
||||||
var failures []error
|
var failures []error
|
||||||
targets, failures = scrape.TargetsFromGroup(tg, scfg, false, targets, lb)
|
targets, failures = scrape.TargetsFromGroup(tg, scfg, targets, lb)
|
||||||
if len(failures) > 0 {
|
if len(failures) > 0 {
|
||||||
first := failures[0]
|
first := failures[0]
|
||||||
return first
|
return first
|
||||||
|
|
|
@ -146,7 +146,7 @@ func TestCheckSDFile(t *testing.T) {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
_, err := checkSDFile(test.file)
|
_, err := checkSDFile(test.file)
|
||||||
if test.err != "" {
|
if test.err != "" {
|
||||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -228,7 +228,7 @@ func TestCheckTargetConfig(t *testing.T) {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||||
if test.err != "" {
|
if test.err != "" {
|
||||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -315,7 +315,7 @@ func TestCheckConfigSyntax(t *testing.T) {
|
||||||
expectedErrMsg = test.errWindows
|
expectedErrMsg = test.errWindows
|
||||||
}
|
}
|
||||||
if expectedErrMsg != "" {
|
if expectedErrMsg != "" {
|
||||||
require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -345,7 +345,7 @@ func TestAuthorizationConfig(t *testing.T) {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||||
if test.err != "" {
|
if test.err != "" {
|
||||||
require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -16,12 +16,12 @@ package main
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/model/timestamp"
|
"github.com/prometheus/prometheus/model/timestamp"
|
||||||
|
@ -38,7 +38,7 @@ type queryRangeAPI interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ruleImporter struct {
|
type ruleImporter struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
config ruleImporterConfig
|
config ruleImporterConfig
|
||||||
|
|
||||||
apiClient queryRangeAPI
|
apiClient queryRangeAPI
|
||||||
|
@ -57,8 +57,8 @@ type ruleImporterConfig struct {
|
||||||
|
|
||||||
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
||||||
// written to disk in blocks.
|
// written to disk in blocks.
|
||||||
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
||||||
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
||||||
return &ruleImporter{
|
return &ruleImporter{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
config: config,
|
config: config,
|
||||||
|
@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string)
|
||||||
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
|
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
|
||||||
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
||||||
for name, group := range importer.groups {
|
for name, group := range importer.groups {
|
||||||
level.Info(importer.logger).Log("backfiller", "processing group", "name", name)
|
importer.logger.Info("processing group", "component", "backfiller", "name", name)
|
||||||
|
|
||||||
for i, r := range group.Rules() {
|
for i, r := range group.Rules() {
|
||||||
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name())
|
importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name())
|
||||||
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
return fmt.Errorf("query range: %w", err)
|
return fmt.Errorf("query range: %w", err)
|
||||||
}
|
}
|
||||||
if warnings != nil {
|
if warnings != nil {
|
||||||
level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings)
|
importer.logger.Warn("Range query returned warnings.", "warnings", warnings)
|
||||||
}
|
}
|
||||||
|
|
||||||
// To prevent races with compaction, a block writer only allows appending samples
|
// To prevent races with compaction, a block writer only allows appending samples
|
||||||
|
@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
||||||
// also need to append samples throughout the whole block range. To allow that, we
|
// also need to append samples throughout the whole block range. To allow that, we
|
||||||
// pretend that the block is twice as large here, but only really add sample in the
|
// pretend that the block is twice as large here, but only really add sample in the
|
||||||
// original interval later.
|
// original interval later.
|
||||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new block writer: %w", err)
|
return fmt.Errorf("new block writer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||||
logger := log.NewNopLogger()
|
logger := promslog.NewNopLogger()
|
||||||
cfg := ruleImporterConfig{
|
cfg := ruleImporterConfig{
|
||||||
outputDir: tmpDir,
|
outputDir: tmpDir,
|
||||||
start: start.Add(-10 * time.Hour),
|
start: start.Add(-10 * time.Hour),
|
||||||
|
|
|
@ -20,9 +20,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/config"
|
"github.com/prometheus/prometheus/config"
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -38,8 +38,8 @@ type sdCheckResult struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckSD performs service discovery for the given job name and reports the results.
|
// CheckSD performs service discovery for the given job name and reports the results.
|
||||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, noDefaultScrapePort bool, registerer prometheus.Registerer) int {
|
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
|
||||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
logger := promslog.New(&promslog.Config{})
|
||||||
|
|
||||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -114,7 +114,7 @@ outerLoop:
|
||||||
}
|
}
|
||||||
results := []sdCheckResult{}
|
results := []sdCheckResult{}
|
||||||
for _, tgs := range sdCheckResults {
|
for _, tgs := range sdCheckResults {
|
||||||
results = append(results, getSDCheckResult(tgs, scrapeConfig, noDefaultScrapePort)...)
|
results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := json.MarshalIndent(results, "", " ")
|
res, err := json.MarshalIndent(results, "", " ")
|
||||||
|
@ -127,7 +127,7 @@ outerLoop:
|
||||||
return successExitCode
|
return successExitCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig, noDefaultScrapePort bool) []sdCheckResult {
|
func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
|
||||||
sdCheckResults := []sdCheckResult{}
|
sdCheckResults := []sdCheckResult{}
|
||||||
lb := labels.NewBuilder(labels.EmptyLabels())
|
lb := labels.NewBuilder(labels.EmptyLabels())
|
||||||
for _, targetGroup := range targetGroups {
|
for _, targetGroup := range targetGroups {
|
||||||
|
@ -144,7 +144,7 @@ func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.Sc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig, noDefaultScrapePort)
|
res, orig, err := scrape.PopulateLabels(lb, scrapeConfig)
|
||||||
result := sdCheckResult{
|
result := sdCheckResult{
|
||||||
DiscoveredLabels: orig,
|
DiscoveredLabels: orig,
|
||||||
Labels: res,
|
Labels: res,
|
||||||
|
|
|
@ -70,5 +70,5 @@ func TestSDCheckResult(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig, true))
|
testutil.RequireEqual(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
|
||||||
}
|
}
|
||||||
|
|
4
cmd/promtool/testdata/unittest.yml
vendored
4
cmd/promtool/testdata/unittest.yml
vendored
|
@ -69,13 +69,13 @@ tests:
|
||||||
eval_time: 2m
|
eval_time: 2m
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- labels: "test_histogram_repeat"
|
- labels: "test_histogram_repeat"
|
||||||
histogram: "{{count:2 sum:3 buckets:[2]}}"
|
histogram: "{{count:2 sum:3 counter_reset_hint:not_reset buckets:[2]}}"
|
||||||
|
|
||||||
- expr: test_histogram_increase
|
- expr: test_histogram_increase
|
||||||
eval_time: 2m
|
eval_time: 2m
|
||||||
exp_samples:
|
exp_samples:
|
||||||
- labels: "test_histogram_increase"
|
- labels: "test_histogram_increase"
|
||||||
histogram: "{{count:4 sum:5.6 buckets:[4]}}"
|
histogram: "{{count:4 sum:5.6 counter_reset_hint:not_reset buckets:[4]}}"
|
||||||
|
|
||||||
# Ensure a value is stale as soon as it is marked as such.
|
# Ensure a value is stale as soon as it is marked as such.
|
||||||
- expr: test_stale
|
- expr: test_stale
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -32,9 +33,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"go.uber.org/atomic"
|
"go.uber.org/atomic"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
"github.com/prometheus/prometheus/promql/parser"
|
"github.com/prometheus/prometheus/promql/parser"
|
||||||
"github.com/prometheus/prometheus/storage"
|
"github.com/prometheus/prometheus/storage"
|
||||||
|
@ -60,7 +62,7 @@ type writeBenchmark struct {
|
||||||
memprof *os.File
|
memprof *os.File
|
||||||
blockprof *os.File
|
blockprof *os.File
|
||||||
mtxprof *os.File
|
mtxprof *os.File
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
|
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
|
||||||
|
@ -68,7 +70,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
||||||
outPath: outPath,
|
outPath: outPath,
|
||||||
samplesFile: samplesFile,
|
samplesFile: samplesFile,
|
||||||
numMetrics: numMetrics,
|
numMetrics: numMetrics,
|
||||||
logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)),
|
logger: promslog.New(&promslog.Config{}),
|
||||||
}
|
}
|
||||||
if b.outPath == "" {
|
if b.outPath == "" {
|
||||||
dir, err := os.MkdirTemp("", "tsdb_bench")
|
dir, err := os.MkdirTemp("", "tsdb_bench")
|
||||||
|
@ -87,9 +89,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
||||||
|
|
||||||
dir := filepath.Join(b.outPath, "storage")
|
dir := filepath.Join(b.outPath, "storage")
|
||||||
|
|
||||||
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{
|
||||||
|
|
||||||
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
|
|
||||||
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||||
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
||||||
}, tsdb.NewDBStats())
|
}, tsdb.NewDBStats())
|
||||||
|
|
|
@ -26,13 +26,13 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/nsf/jsondiff"
|
"github.com/nsf/jsondiff"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/model/histogram"
|
"github.com/prometheus/prometheus/model/histogram"
|
||||||
"github.com/prometheus/prometheus/model/labels"
|
"github.com/prometheus/prometheus/model/labels"
|
||||||
|
@ -218,7 +218,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
||||||
Appendable: suite.Storage(),
|
Appendable: suite.Storage(),
|
||||||
Context: context.Background(),
|
Context: context.Background(),
|
||||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
|
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
|
||||||
Logger: log.NewNopLogger(),
|
Logger: promslog.NewNopLogger(),
|
||||||
}
|
}
|
||||||
m := rules.NewManager(opts)
|
m := rules.NewManager(opts)
|
||||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
||||||
|
|
|
@ -16,6 +16,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -25,8 +26,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -73,7 +72,7 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Load parses the YAML input s into a Config.
|
// Load parses the YAML input s into a Config.
|
||||||
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||||
cfg := &Config{}
|
cfg := &Config{}
|
||||||
// If the entire config body is empty the UnmarshalYAML method is
|
// If the entire config body is empty the UnmarshalYAML method is
|
||||||
// never called. We thus have to set the DefaultConfig at the entry
|
// never called. We thus have to set the DefaultConfig at the entry
|
||||||
|
@ -98,11 +97,11 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
||||||
if v := os.Getenv(s); v != "" {
|
if v := os.Getenv(s); v != "" {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
level.Warn(logger).Log("msg", "Empty environment variable", "name", s)
|
logger.Warn("Empty environment variable", "name", s)
|
||||||
return ""
|
return ""
|
||||||
})
|
})
|
||||||
if newV != v.Value {
|
if newV != v.Value {
|
||||||
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
||||||
}
|
}
|
||||||
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
||||||
b.Add(v.Name, newV)
|
b.Add(v.Name, newV)
|
||||||
|
@ -112,7 +111,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadFile parses the given YAML file into a Config.
|
// LoadFile parses the given YAML file into a Config.
|
||||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||||
content, err := os.ReadFile(filename)
|
content, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -24,10 +24,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/alecthomas/units"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -1501,7 +1501,7 @@ var expectedConf = &Config{
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestYAMLRoundtrip(t *testing.T) {
|
func TestYAMLRoundtrip(t *testing.T) {
|
||||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger())
|
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -1514,7 +1514,7 @@ func TestYAMLRoundtrip(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger())
|
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -1529,7 +1529,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||||
|
|
||||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||||
t.Run("good config", func(t *testing.T) {
|
t.Run("good config", func(t *testing.T) {
|
||||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
|
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -1541,7 +1541,7 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("bad config", func(t *testing.T) {
|
t.Run("bad config", func(t *testing.T) {
|
||||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
|
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||||
})
|
})
|
||||||
|
@ -1550,16 +1550,16 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||||
func TestLoadConfig(t *testing.T) {
|
func TestLoadConfig(t *testing.T) {
|
||||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||||
// an overwritten default field in the global config permanently changes the default.
|
// an overwritten default field in the global config permanently changes the default.
|
||||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, expectedConf, c)
|
require.Equal(t, expectedConf, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScrapeIntervalLarger(t *testing.T) {
|
func TestScrapeIntervalLarger(t *testing.T) {
|
||||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, c.ScrapeConfigs, 1)
|
require.Len(t, c.ScrapeConfigs, 1)
|
||||||
for _, sc := range c.ScrapeConfigs {
|
for _, sc := range c.ScrapeConfigs {
|
||||||
|
@ -1569,7 +1569,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
|
||||||
|
|
||||||
// YAML marshaling must not reveal authentication credentials.
|
// YAML marshaling must not reveal authentication credentials.
|
||||||
func TestElideSecrets(t *testing.T) {
|
func TestElideSecrets(t *testing.T) {
|
||||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
||||||
|
@ -1586,31 +1586,31 @@ func TestElideSecrets(t *testing.T) {
|
||||||
|
|
||||||
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
||||||
// Parse a valid file that sets a rule files with an absolute path
|
// Parse a valid file that sets a rule files with an absolute path
|
||||||
c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger())
|
c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, ruleFilesExpectedConf, c)
|
require.Equal(t, ruleFilesExpectedConf, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesWithKubeConfig(t *testing.T) {
|
func TestKubernetesWithKubeConfig(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestKubernetesSelectors(t *testing.T) {
|
func TestKubernetesSelectors(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2094,9 +2094,8 @@ func TestBadConfigs(t *testing.T) {
|
||||||
model.NameValidationScheme = model.UTF8Validation
|
model.NameValidationScheme = model.UTF8Validation
|
||||||
}()
|
}()
|
||||||
for _, ee := range expectedErrors {
|
for _, ee := range expectedErrors {
|
||||||
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger())
|
||||||
require.Error(t, err, "%s", ee.filename)
|
require.ErrorContains(t, err, ee.errMsg,
|
||||||
require.Contains(t, err.Error(), ee.errMsg,
|
|
||||||
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2126,7 +2125,7 @@ func TestBadStaticConfigsYML(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyConfig(t *testing.T) {
|
func TestEmptyConfig(t *testing.T) {
|
||||||
c, err := Load("", false, log.NewNopLogger())
|
c, err := Load("", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exp := DefaultConfig
|
exp := DefaultConfig
|
||||||
require.Equal(t, exp, *c)
|
require.Equal(t, exp, *c)
|
||||||
|
@ -2136,38 +2135,38 @@ func TestExpandExternalLabels(t *testing.T) {
|
||||||
// Cleanup ant TEST env variable that could exist on the system.
|
// Cleanup ant TEST env variable that could exist on the system.
|
||||||
os.Setenv("TEST", "")
|
os.Setenv("TEST", "")
|
||||||
|
|
||||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
||||||
|
|
||||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||||
|
|
||||||
os.Setenv("TEST", "TestValue")
|
os.Setenv("TEST", "TestValue")
|
||||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgentMode(t *testing.T) {
|
func TestAgentMode(t *testing.T) {
|
||||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger())
|
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||||
|
|
||||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||||
|
|
||||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
||||||
|
|
||||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger())
|
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger())
|
||||||
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
||||||
|
|
||||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
|
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Empty(t, c.RemoteWriteConfigs)
|
require.Empty(t, c.RemoteWriteConfigs)
|
||||||
|
|
||||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
|
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, c.RemoteWriteConfigs, 1)
|
require.Len(t, c.RemoteWriteConfigs, 1)
|
||||||
require.Equal(
|
require.Equal(
|
||||||
|
@ -2178,7 +2177,7 @@ func TestAgentMode(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptyGlobalBlock(t *testing.T) {
|
func TestEmptyGlobalBlock(t *testing.T) {
|
||||||
c, err := Load("global:\n", false, log.NewNopLogger())
|
c, err := Load("global:\n", false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
exp := DefaultConfig
|
exp := DefaultConfig
|
||||||
exp.Runtime = DefaultRuntimeConfig
|
exp.Runtime = DefaultRuntimeConfig
|
||||||
|
@ -2333,7 +2332,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger())
|
c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
scfgs, err := c.GetScrapeConfigs()
|
scfgs, err := c.GetScrapeConfigs()
|
||||||
|
@ -2351,7 +2350,7 @@ func kubernetesSDHostURL() config.URL {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestScrapeConfigDisableCompression(t *testing.T) {
|
func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger())
|
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
@ -2398,7 +2397,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger())
|
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
out, err := yaml.Marshal(want)
|
out, err := yaml.Marshal(want)
|
||||||
|
|
|
@ -233,7 +233,7 @@ type Config interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type DiscovererOptions struct {
|
type DiscovererOptions struct {
|
||||||
Logger log.Logger
|
Logger *slog.Logger
|
||||||
|
|
||||||
// A registerer for the Discoverer's metrics.
|
// A registerer for the Discoverer's metrics.
|
||||||
Registerer prometheus.Registerer
|
Registerer prometheus.Registerer
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -29,11 +30,10 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/ec2"
|
"github.com/aws/aws-sdk-go/service/ec2"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
@ -146,7 +146,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
// the Discoverer interface.
|
// the Discoverer interface.
|
||||||
type EC2Discovery struct {
|
type EC2Discovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
cfg *EC2SDConfig
|
cfg *EC2SDConfig
|
||||||
ec2 *ec2.EC2
|
ec2 *ec2.EC2
|
||||||
|
|
||||||
|
@ -157,14 +157,14 @@ type EC2Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
||||||
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||||
m, ok := metrics.(*ec2Metrics)
|
m, ok := metrics.(*ec2Metrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
d := &EC2Discovery{
|
d := &EC2Discovery{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
@ -254,8 +254,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
// Prometheus requires a reload if AWS adds a new AZ to the region.
|
// Prometheus requires a reload if AWS adds a new AZ to the region.
|
||||||
if d.azToAZID == nil {
|
if d.azToAZID == nil {
|
||||||
if err := d.refreshAZIDs(ctx); err != nil {
|
if err := d.refreshAZIDs(ctx); err != nil {
|
||||||
level.Debug(d.logger).Log(
|
d.logger.Debug(
|
||||||
"msg", "Unable to describe availability zones",
|
"Unable to describe availability zones",
|
||||||
"err", err)
|
"err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,8 +296,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
||||||
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
|
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
|
||||||
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
|
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
|
||||||
if !ok && d.azToAZID != nil {
|
if !ok && d.azToAZID != nil {
|
||||||
level.Debug(d.logger).Log(
|
d.logger.Debug(
|
||||||
"msg", "Availability zone ID not found",
|
"Availability zone ID not found",
|
||||||
"az", *inst.Placement.AvailabilityZone)
|
"az", *inst.Placement.AvailabilityZone)
|
||||||
}
|
}
|
||||||
labels[ec2LabelAZID] = model.LabelValue(azID)
|
labels[ec2LabelAZID] = model.LabelValue(azID)
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -29,10 +30,10 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/lightsail"
|
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
@ -130,14 +131,14 @@ type LightsailDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
||||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||||
m, ok := metrics.(*lightsailMetrics)
|
m, ok := metrics.(*lightsailMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &LightsailDiscovery{
|
d := &LightsailDiscovery{
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -35,10 +36,9 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||||
cache "github.com/Code-Hex/go-generics-cache"
|
cache "github.com/Code-Hex/go-generics-cache"
|
||||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
config_util "github.com/prometheus/common/config"
|
config_util "github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -175,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
cfg *SDConfig
|
cfg *SDConfig
|
||||||
port int
|
port int
|
||||||
cache *cache.Cache[string, *armnetwork.Interface]
|
cache *cache.Cache[string, *armnetwork.Interface]
|
||||||
|
@ -183,14 +183,14 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*azureMetrics)
|
m, ok := metrics.(*azureMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
|
@ -228,13 +228,13 @@ type azureClient struct {
|
||||||
vm *armcompute.VirtualMachinesClient
|
vm *armcompute.VirtualMachinesClient
|
||||||
vmss *armcompute.VirtualMachineScaleSetsClient
|
vmss *armcompute.VirtualMachineScaleSetsClient
|
||||||
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ client = &azureClient{}
|
var _ client = &azureClient{}
|
||||||
|
|
||||||
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
||||||
func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
|
func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) {
|
||||||
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &azureClient{}, err
|
return &azureClient{}, err
|
||||||
|
@ -337,21 +337,21 @@ type virtualMachine struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new azureResource object from an ID string.
|
// Create a new azureResource object from an ID string.
|
||||||
func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) {
|
func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
resourceID, err := arm.ParseResourceID(id)
|
resourceID, err := arm.ParseResourceID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
||||||
level.Error(logger).Log("err", err)
|
logger.Error("Failed to parse resource ID", "err", err)
|
||||||
return &arm.ResourceID{}, err
|
return &arm.ResourceID{}, err
|
||||||
}
|
}
|
||||||
return resourceID, nil
|
return resourceID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
|
defer d.logger.Debug("Azure discovery completed")
|
||||||
|
|
||||||
client, err := createAzureClient(*d.cfg, d.logger)
|
client, err := createAzureClient(*d.cfg, d.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -365,7 +365,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines))
|
d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines))
|
||||||
|
|
||||||
// Load the vms managed by scale sets.
|
// Load the vms managed by scale sets.
|
||||||
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
||||||
|
@ -459,7 +459,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, errorNotFound) {
|
if errors.Is(err, errorNotFound) {
|
||||||
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
d.logger.Warn("Network interface does not exist", "name", nicID, "err", err)
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -480,7 +480,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
||||||
// yet support this. On deallocated machines, this value happens to be nil so it
|
// yet support this. On deallocated machines, this value happens to be nil so it
|
||||||
// is a cheap and easy way to determine if a machine is allocated or not.
|
// is a cheap and easy way to determine if a machine is allocated or not.
|
||||||
if networkInterface.Properties.Primary == nil {
|
if networkInterface.Properties.Primary == nil {
|
||||||
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
|
d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -724,7 +724,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) {
|
||||||
rs := time.Duration(random) * time.Second
|
rs := time.Duration(random) * time.Second
|
||||||
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
|
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
|
||||||
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
|
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
|
||||||
level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds())
|
d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
// getFromCache will get the network Interface for the specified nicID
|
// getFromCache will get the network Interface for the specified nicID
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||||
cache "github.com/Code-Hex/go-generics-cache"
|
cache "github.com/Code-Hex/go-generics-cache"
|
||||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||||
"github.com/go-kit/log"
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
)
|
)
|
||||||
|
@ -150,7 +150,7 @@ func TestVMToLabelSet(t *testing.T) {
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
cfg: &cfg,
|
cfg: &cfg,
|
||||||
logger: log.NewNopLogger(),
|
logger: promslog.NewNopLogger(),
|
||||||
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
|
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
|
||||||
}
|
}
|
||||||
network := armnetwork.Interface{
|
network := armnetwork.Interface{
|
||||||
|
|
|
@ -17,17 +17,17 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
consul "github.com/hashicorp/consul/api"
|
consul "github.com/hashicorp/consul/api"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -113,8 +113,11 @@ type SDConfig struct {
|
||||||
Services []string `yaml:"services,omitempty"`
|
Services []string `yaml:"services,omitempty"`
|
||||||
// A list of tags used to filter instances inside a service. Services must contain all tags in the list.
|
// A list of tags used to filter instances inside a service. Services must contain all tags in the list.
|
||||||
ServiceTags []string `yaml:"tags,omitempty"`
|
ServiceTags []string `yaml:"tags,omitempty"`
|
||||||
// Desired node metadata.
|
// Desired node metadata. As of Consul 1.14, consider `filter` instead.
|
||||||
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
||||||
|
// Consul filter string
|
||||||
|
// See https://www.consul.io/api-docs/catalog#filtering-1, for syntax
|
||||||
|
Filter string `yaml:"filter,omitempty"`
|
||||||
|
|
||||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||||
}
|
}
|
||||||
|
@ -174,22 +177,23 @@ type Discovery struct {
|
||||||
watchedServices []string // Set of services which will be discovered.
|
watchedServices []string // Set of services which will be discovered.
|
||||||
watchedTags []string // Tags used to filter instances of a service.
|
watchedTags []string // Tags used to filter instances of a service.
|
||||||
watchedNodeMeta map[string]string
|
watchedNodeMeta map[string]string
|
||||||
|
watchedFilter string
|
||||||
allowStale bool
|
allowStale bool
|
||||||
refreshInterval time.Duration
|
refreshInterval time.Duration
|
||||||
finalizer func()
|
finalizer func()
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
metrics *consulMetrics
|
metrics *consulMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery for the given config.
|
// NewDiscovery returns a new Discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*consulMetrics)
|
m, ok := metrics.(*consulMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
||||||
|
@ -218,6 +222,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
|
||||||
watchedServices: conf.Services,
|
watchedServices: conf.Services,
|
||||||
watchedTags: conf.ServiceTags,
|
watchedTags: conf.ServiceTags,
|
||||||
watchedNodeMeta: conf.NodeMeta,
|
watchedNodeMeta: conf.NodeMeta,
|
||||||
|
watchedFilter: conf.Filter,
|
||||||
allowStale: conf.AllowStale,
|
allowStale: conf.AllowStale,
|
||||||
refreshInterval: time.Duration(conf.RefreshInterval),
|
refreshInterval: time.Duration(conf.RefreshInterval),
|
||||||
clientDatacenter: conf.Datacenter,
|
clientDatacenter: conf.Datacenter,
|
||||||
|
@ -282,7 +287,7 @@ func (d *Discovery) getDatacenter() error {
|
||||||
|
|
||||||
info, err := d.client.Agent().Self()
|
info, err := d.client.Agent().Self()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
d.logger.Error("Error retrieving datacenter name", "err", err)
|
||||||
d.metrics.rpcFailuresCount.Inc()
|
d.metrics.rpcFailuresCount.Inc()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -290,12 +295,12 @@ func (d *Discovery) getDatacenter() error {
|
||||||
dc, ok := info["Config"]["Datacenter"].(string)
|
dc, ok := info["Config"]["Datacenter"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
||||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
d.logger.Error("Error retrieving datacenter name", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.clientDatacenter = dc
|
d.clientDatacenter = dc
|
||||||
d.logger = log.With(d.logger, "datacenter", dc)
|
d.logger = d.logger.With("datacenter", dc)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -361,13 +366,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
// entire list of services.
|
// entire list of services.
|
||||||
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
|
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
|
||||||
catalog := d.client.Catalog()
|
catalog := d.client.Catalog()
|
||||||
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
|
d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter)
|
||||||
|
|
||||||
opts := &consul.QueryOptions{
|
opts := &consul.QueryOptions{
|
||||||
WaitIndex: *lastIndex,
|
WaitIndex: *lastIndex,
|
||||||
WaitTime: watchTimeout,
|
WaitTime: watchTimeout,
|
||||||
AllowStale: d.allowStale,
|
AllowStale: d.allowStale,
|
||||||
NodeMeta: d.watchedNodeMeta,
|
NodeMeta: d.watchedNodeMeta,
|
||||||
|
Filter: d.watchedFilter,
|
||||||
}
|
}
|
||||||
t0 := time.Now()
|
t0 := time.Now()
|
||||||
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
||||||
|
@ -382,7 +388,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
d.logger.Error("Error refreshing service list", "err", err)
|
||||||
d.metrics.rpcFailuresCount.Inc()
|
d.metrics.rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
return
|
return
|
||||||
|
@ -445,7 +451,7 @@ type consulService struct {
|
||||||
discovery *Discovery
|
discovery *Discovery
|
||||||
client *consul.Client
|
client *consul.Client
|
||||||
tagSeparator string
|
tagSeparator string
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
rpcFailuresCount prometheus.Counter
|
rpcFailuresCount prometheus.Counter
|
||||||
serviceRPCDuration prometheus.Observer
|
serviceRPCDuration prometheus.Observer
|
||||||
}
|
}
|
||||||
|
@ -490,7 +496,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
|
||||||
|
|
||||||
// Get updates for a service.
|
// Get updates for a service.
|
||||||
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
|
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
|
||||||
level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
|
srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
|
||||||
|
|
||||||
opts := &consul.QueryOptions{
|
opts := &consul.QueryOptions{
|
||||||
WaitIndex: *lastIndex,
|
WaitIndex: *lastIndex,
|
||||||
|
@ -513,7 +519,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
||||||
srv.rpcFailuresCount.Inc()
|
srv.rpcFailuresCount.Inc()
|
||||||
time.Sleep(retryInterval)
|
time.Sleep(retryInterval)
|
||||||
return
|
return
|
||||||
|
|
|
@ -21,10 +21,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/goleak"
|
"go.uber.org/goleak"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
||||||
case "/v1/catalog/services?index=1&wait=120000ms":
|
case "/v1/catalog/services?index=1&wait=120000ms":
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
response = ServicesTestAnswer
|
response = ServicesTestAnswer
|
||||||
|
case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms":
|
||||||
|
response = ServicesTestAnswer
|
||||||
default:
|
default:
|
||||||
t.Errorf("Unhandled consul call: %s", r.URL)
|
t.Errorf("Unhandled consul call: %s", r.URL)
|
||||||
}
|
}
|
||||||
|
@ -270,7 +272,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
||||||
logger := log.NewNopLogger()
|
logger := promslog.NewNopLogger()
|
||||||
|
|
||||||
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
|
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
|
||||||
|
|
||||||
|
@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) {
|
||||||
<-ch
|
<-ch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Watch the test service with a specific tag and node-meta via Filter parameter.
|
||||||
|
func TestFilterOption(t *testing.T) {
|
||||||
|
stub, config := newServer(t)
|
||||||
|
defer stub.Close()
|
||||||
|
|
||||||
|
config.Services = []string{"test"}
|
||||||
|
config.Filter = `NodeMeta.rack_name == "2304"`
|
||||||
|
config.Token = "fake-token"
|
||||||
|
|
||||||
|
d := newDiscovery(t, config)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ch := make(chan []*targetgroup.Group)
|
||||||
|
go func() {
|
||||||
|
d.Run(ctx, ch)
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
checkOneTarget(t, <-ch)
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetDatacenterShouldReturnError(t *testing.T) {
|
func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
handler func(http.ResponseWriter, *http.Request)
|
handler func(http.ResponseWriter, *http.Request)
|
||||||
|
@ -407,7 +430,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||||
err = d.getDatacenter()
|
err = d.getDatacenter()
|
||||||
|
|
||||||
// An error should be returned.
|
// An error should be returned.
|
||||||
require.Equal(t, tc.errMessage, err.Error())
|
require.EqualError(t, err, tc.errMessage)
|
||||||
// Should still be empty.
|
// Should still be empty.
|
||||||
require.Equal(t, "", d.clientDatacenter)
|
require.Equal(t, "", d.clientDatacenter)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ package digitalocean
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -23,7 +24,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/digitalocean/godo"
|
"github.com/digitalocean/godo"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -111,7 +111,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*digitaloceanMetrics)
|
m, ok := metrics.(*digitaloceanMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -19,9 +19,9 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -15,9 +15,9 @@ package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log/slog"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ type DiscovererMetrics interface {
|
||||||
|
|
||||||
// DiscovererOptions provides options for a Discoverer.
|
// DiscovererOptions provides options for a Discoverer.
|
||||||
type DiscovererOptions struct {
|
type DiscovererOptions struct {
|
||||||
Logger log.Logger
|
Logger *slog.Logger
|
||||||
|
|
||||||
Metrics DiscovererMetrics
|
Metrics DiscovererMetrics
|
||||||
|
|
||||||
|
@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) {
|
||||||
|
|
||||||
// UnmarshalYAML implements yaml.Unmarshaler.
|
// UnmarshalYAML implements yaml.Unmarshaler.
|
||||||
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
cfgTyp := getConfigType(configsType)
|
cfgTyp := reflect.StructOf(configFields)
|
||||||
cfgPtr := reflect.New(cfgTyp)
|
cfgPtr := reflect.New(cfgTyp)
|
||||||
cfgVal := cfgPtr.Elem()
|
cfgVal := cfgPtr.Elem()
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
|
||||||
// MarshalYAML implements yaml.Marshaler.
|
// MarshalYAML implements yaml.Marshaler.
|
||||||
func (c Configs) MarshalYAML() (interface{}, error) {
|
func (c Configs) MarshalYAML() (interface{}, error) {
|
||||||
cfgTyp := getConfigType(configsType)
|
cfgTyp := reflect.StructOf(configFields)
|
||||||
cfgPtr := reflect.New(cfgTyp)
|
cfgPtr := reflect.New(cfgTyp)
|
||||||
cfgVal := cfgPtr.Elem()
|
cfgVal := cfgPtr.Elem()
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2019 The Prometheus Authors
|
// Copyright 2024 The Prometheus Authors
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
|
@ -11,25 +11,26 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package testutil
|
package discovery
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/stretchr/testify/require"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type logger struct {
|
func TestConfigsCustomUnMarshalMarshal(t *testing.T) {
|
||||||
t *testing.T
|
input := `static_configs:
|
||||||
}
|
- targets:
|
||||||
|
- foo:1234
|
||||||
|
- bar:4321
|
||||||
|
`
|
||||||
|
cfg := &Configs{}
|
||||||
|
err := yaml.UnmarshalStrict([]byte(input), cfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// NewLogger returns a gokit compatible Logger which calls t.Log.
|
output, err := yaml.Marshal(cfg)
|
||||||
func NewLogger(t *testing.T) log.Logger {
|
require.NoError(t, err)
|
||||||
return logger{t: t}
|
require.Equal(t, input, string(output))
|
||||||
}
|
|
||||||
|
|
||||||
// Log implements log.Logger.
|
|
||||||
func (t logger) Log(keyvals ...interface{}) error {
|
|
||||||
t.t.Log(keyvals...)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
|
@ -17,17 +17,17 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/refresh"
|
"github.com/prometheus/prometheus/discovery/refresh"
|
||||||
|
@ -111,21 +111,21 @@ type Discovery struct {
|
||||||
names []string
|
names []string
|
||||||
port int
|
port int
|
||||||
qtype uint16
|
qtype uint16
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
metrics *dnsMetrics
|
metrics *dnsMetrics
|
||||||
|
|
||||||
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*dnsMetrics)
|
m, ok := metrics.(*dnsMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
qtype := dns.TypeSRV
|
qtype := dns.TypeSRV
|
||||||
|
@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||||
for _, name := range d.names {
|
for _, name := range d.names {
|
||||||
go func(n string) {
|
go func(n string) {
|
||||||
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
|
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
|
||||||
level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err)
|
d.logger.Error("Error refreshing DNS targets", "err", err)
|
||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(name)
|
}(name)
|
||||||
|
@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
// CNAME responses can occur with "Type: A" dns_sd_config requests.
|
// CNAME responses can occur with "Type: A" dns_sd_config requests.
|
||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
level.Warn(d.logger).Log("msg", "Invalid record", "record", record)
|
d.logger.Warn("Invalid record", "record", record)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, model.LabelSet{
|
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||||
|
@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
||||||
// error will be generic-looking, because trying to return all the errors
|
// error will be generic-looking, because trying to return all the errors
|
||||||
// returned by the combination of all name permutations and servers is a
|
// returned by the combination of all name permutations and servers is a
|
||||||
// nightmare.
|
// nightmare.
|
||||||
func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
conf, err := dns.ClientConfigFromFile(resolvConf)
|
conf, err := dns.ClientConfigFromFile(resolvConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not load resolv.conf: %w", err)
|
return nil, fmt.Errorf("could not load resolv.conf: %w", err)
|
||||||
|
@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
||||||
// A non-viable answer is "anything else", which encompasses both various
|
// A non-viable answer is "anything else", which encompasses both various
|
||||||
// system-level problems (like network timeouts) and also
|
// system-level problems (like network timeouts) and also
|
||||||
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
|
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
|
||||||
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) {
|
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
client := &dns.Client{}
|
client := &dns.Client{}
|
||||||
|
|
||||||
for _, server := range conf.Servers {
|
for _, server := range conf.Servers {
|
||||||
servAddr := net.JoinHostPort(server, conf.Port)
|
servAddr := net.JoinHostPort(server, conf.Port)
|
||||||
msg, err := askServerForName(name, qtype, client, servAddr, true)
|
msg, err := askServerForName(name, qtype, client, servAddr, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err)
|
logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,11 +16,11 @@ package dns
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -40,7 +40,7 @@ func TestDNS(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
config SDConfig
|
config SDConfig
|
||||||
lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
||||||
|
|
||||||
expected []*targetgroup.Group
|
expected []*targetgroup.Group
|
||||||
}{
|
}{
|
||||||
|
@ -52,7 +52,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
Type: "A",
|
Type: "A",
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return nil, fmt.Errorf("some error")
|
return nil, fmt.Errorf("some error")
|
||||||
},
|
},
|
||||||
expected: []*targetgroup.Group{},
|
expected: []*targetgroup.Group{},
|
||||||
|
@ -65,7 +65,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
Type: "A",
|
Type: "A",
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.A{A: net.IPv4(192, 0, 2, 2)},
|
&dns.A{A: net.IPv4(192, 0, 2, 2)},
|
||||||
|
@ -97,7 +97,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 80,
|
Port: 80,
|
||||||
Type: "AAAA",
|
Type: "AAAA",
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.AAAA{AAAA: net.IPv6loopback},
|
&dns.AAAA{AAAA: net.IPv6loopback},
|
||||||
|
@ -128,7 +128,7 @@ func TestDNS(t *testing.T) {
|
||||||
Type: "SRV",
|
Type: "SRV",
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
||||||
|
@ -167,7 +167,7 @@ func TestDNS(t *testing.T) {
|
||||||
Names: []string{"_mysql._tcp.db.example.com."},
|
Names: []string{"_mysql._tcp.db.example.com."},
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
||||||
|
@ -198,7 +198,7 @@ func TestDNS(t *testing.T) {
|
||||||
Names: []string{"_mysql._tcp.db.example.com."},
|
Names: []string{"_mysql._tcp.db.example.com."},
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{}, nil
|
return &dns.Msg{}, nil
|
||||||
},
|
},
|
||||||
expected: []*targetgroup.Group{
|
expected: []*targetgroup.Group{
|
||||||
|
@ -215,7 +215,7 @@ func TestDNS(t *testing.T) {
|
||||||
Port: 25,
|
Port: 25,
|
||||||
RefreshInterval: model.Duration(time.Minute),
|
RefreshInterval: model.Duration(time.Minute),
|
||||||
},
|
},
|
||||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||||
return &dns.Msg{
|
return &dns.Msg{
|
||||||
Answer: []dns.RR{
|
Answer: []dns.RR{
|
||||||
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
|
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -126,7 +126,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*eurekaMetrics)
|
m, ok := metrics.(*eurekaMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -26,12 +27,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/fsnotify/fsnotify"
|
"github.com/fsnotify/fsnotify"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -175,20 +175,20 @@ type Discovery struct {
|
||||||
// and how many target groups they contained.
|
// and how many target groups they contained.
|
||||||
// This is used to detect deleted target groups.
|
// This is used to detect deleted target groups.
|
||||||
lastRefresh map[string]int
|
lastRefresh map[string]int
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
metrics *fileMetrics
|
metrics *fileMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new file discovery for the given paths.
|
// NewDiscovery returns a new file discovery for the given paths.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
fm, ok := metrics.(*fileMetrics)
|
fm, ok := metrics.(*fileMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
disc := &Discovery{
|
disc := &Discovery{
|
||||||
|
@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string {
|
||||||
for _, p := range d.paths {
|
for _, p := range d.paths {
|
||||||
files, err := filepath.Glob(p)
|
files, err := filepath.Glob(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err)
|
d.logger.Error("Error expanding glob", "glob", p, "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
paths = append(paths, files...)
|
paths = append(paths, files...)
|
||||||
|
@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() {
|
||||||
p = "./"
|
p = "./"
|
||||||
}
|
}
|
||||||
if err := d.watcher.Add(p); err != nil {
|
if err := d.watcher.Add(p); err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err)
|
d.logger.Error("Error adding file watch", "path", p, "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() {
|
||||||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
watcher, err := fsnotify.NewWatcher()
|
watcher, err := fsnotify.NewWatcher()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
d.logger.Error("Error adding file watcher", "err", err)
|
||||||
d.metrics.fileWatcherErrorsCount.Inc()
|
d.metrics.fileWatcherErrorsCount.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
case err := <-d.watcher.Errors:
|
case err := <-d.watcher.Errors:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error watching file", "err", err)
|
d.logger.Error("Error watching file", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) {
|
||||||
|
|
||||||
// stop shuts down the file watcher.
|
// stop shuts down the file watcher.
|
||||||
func (d *Discovery) stop() {
|
func (d *Discovery) stop() {
|
||||||
level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
|
d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
@ -320,10 +320,10 @@ func (d *Discovery) stop() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
if err := d.watcher.Close(); err != nil {
|
if err := d.watcher.Close(); err != nil {
|
||||||
level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
|
d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Debug(d.logger).Log("msg", "File discovery stopped")
|
d.logger.Debug("File discovery stopped")
|
||||||
}
|
}
|
||||||
|
|
||||||
// refresh reads all files matching the discovery's patterns and sends the respective
|
// refresh reads all files matching the discovery's patterns and sends the respective
|
||||||
|
@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.metrics.fileSDReadErrorsCount.Inc()
|
d.metrics.fileSDReadErrorsCount.Inc()
|
||||||
|
|
||||||
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
d.logger.Error("Error reading file", "path", p, "err", err)
|
||||||
// Prevent deletion down below.
|
// Prevent deletion down below.
|
||||||
ref[p] = d.lastRefresh[p]
|
ref[p] = d.lastRefresh[p]
|
||||||
continue
|
continue
|
||||||
|
@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
for f, n := range d.lastRefresh {
|
for f, n := range d.lastRefresh {
|
||||||
m, ok := ref[f]
|
m, ok := ref[f]
|
||||||
if !ok || n > m {
|
if !ok || n > m {
|
||||||
level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f)
|
d.logger.Debug("file_sd refresh found file that should be removed", "file", f)
|
||||||
d.deleteTimestamp(f)
|
d.deleteTimestamp(f)
|
||||||
for i := m; i < n; i++ {
|
for i := m; i < n; i++ {
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -17,12 +17,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
|
@ -129,7 +129,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*gceMetrics)
|
m, ok := metrics.(*gceMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -15,12 +15,12 @@ package hetzner
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -58,7 +58,7 @@ type hcloudDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||||
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
|
func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) {
|
||||||
d := &hcloudDiscovery{
|
d := &hcloudDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,8 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) {
|
||||||
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
|
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
|
||||||
cfg.hcloudEndpoint = suite.Mock.Endpoint()
|
cfg.hcloudEndpoint = suite.Mock.Endpoint()
|
||||||
|
|
||||||
d, err := newHcloudDiscovery(&cfg, log.NewNopLogger())
|
d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
|
|
|
@ -17,9 +17,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -135,7 +135,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*hetznerMetrics)
|
m, ok := metrics.(*hetznerMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
@ -157,7 +157,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
|
||||||
switch conf.Role {
|
switch conf.Role {
|
||||||
case HetznerRoleHcloud:
|
case HetznerRoleHcloud:
|
||||||
if conf.hcloudEndpoint == "" {
|
if conf.hcloudEndpoint == "" {
|
||||||
|
|
|
@ -18,13 +18,13 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
@ -51,7 +51,7 @@ type robotDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
||||||
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
|
func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) {
|
||||||
d := &robotDiscovery{
|
d := &robotDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
endpoint: conf.robotEndpoint,
|
endpoint: conf.robotEndpoint,
|
||||||
|
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) {
|
||||||
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
|
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
|
||||||
cfg.robotEndpoint = suite.Mock.Endpoint()
|
cfg.robotEndpoint = suite.Mock.Endpoint()
|
||||||
|
|
||||||
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
|
d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
|
@ -91,12 +91,11 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
|
||||||
cfg := DefaultSDConfig
|
cfg := DefaultSDConfig
|
||||||
cfg.robotEndpoint = suite.Mock.Endpoint()
|
cfg.robotEndpoint = suite.Mock.Endpoint()
|
||||||
|
|
||||||
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
|
d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
targetGroups, err := d.refresh(context.Background())
|
targetGroups, err := d.refresh(context.Background())
|
||||||
require.Error(t, err)
|
require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot")
|
||||||
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
|
|
||||||
|
|
||||||
require.Empty(t, targetGroups)
|
require.Empty(t, targetGroups)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,17 +19,18 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -114,14 +115,14 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new HTTP discovery for the given config.
|
// NewDiscovery returns a new HTTP discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*httpMetrics)
|
m, ok := metrics.(*httpMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...)
|
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...)
|
||||||
|
|
|
@ -21,11 +21,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) {
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -16,9 +16,9 @@ package ionos
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -43,7 +43,7 @@ func init() {
|
||||||
type Discovery struct{}
|
type Discovery struct{}
|
||||||
|
|
||||||
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*ionosMetrics)
|
m, ok := metrics.(*ionosMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -16,13 +16,13 @@ package ionos
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
ionoscloud "github.com/ionos-cloud/sdk-go/v6"
|
ionoscloud "github.com/ionos-cloud/sdk-go/v6"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -60,7 +60,7 @@ type serverDiscovery struct {
|
||||||
datacenterID string
|
datacenterID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
|
func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) {
|
||||||
d := &serverDiscovery{
|
d := &serverDiscovery{
|
||||||
port: conf.Port,
|
port: conf.Port,
|
||||||
datacenterID: conf.DatacenterID,
|
datacenterID: conf.DatacenterID,
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
@ -33,7 +33,7 @@ import (
|
||||||
|
|
||||||
// Endpoints discovers new endpoint targets.
|
// Endpoints discovers new endpoint targets.
|
||||||
type Endpoints struct {
|
type Endpoints struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
endpointsInf cache.SharedIndexInformer
|
endpointsInf cache.SharedIndexInformer
|
||||||
serviceInf cache.SharedInformer
|
serviceInf cache.SharedInformer
|
||||||
|
@ -49,9 +49,9 @@ type Endpoints struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpoints returns a new endpoints discovery.
|
// NewEndpoints returns a new endpoints discovery.
|
||||||
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
|
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
|
||||||
|
@ -92,13 +92,13 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err)
|
l.Error("Error adding endpoints event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
e.logger.Error("converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
|
e.logger.Error("retrieving endpoints failed", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
@ -131,7 +131,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
l.Error("Error adding services event handler.", "err", err)
|
||||||
}
|
}
|
||||||
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
UpdateFunc: func(old, cur interface{}) {
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
|
@ -154,7 +154,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
l.Error("Error adding pods event handler.", "err", err)
|
||||||
}
|
}
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
|
@ -172,7 +172,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
l.Error("Error adding nodes event handler.", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
||||||
func (e *Endpoints) enqueueNode(nodeName string) {
|
func (e *Endpoints) enqueueNode(nodeName string) {
|
||||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) {
|
||||||
func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
||||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
|
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache")
|
e.logger.Error("endpoints informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -247,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
|
|
||||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
|
e.logger.Error("splitting key failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
o, exists, err := e.endpointsStore.GetByKey(key)
|
o, exists, err := e.endpointsStore.GetByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
|
e.logger.Error("getting object from store failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -262,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
}
|
}
|
||||||
eps, err := convertToEndpoints(o)
|
eps, err := convertToEndpoints(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err)
|
e.logger.Error("converting to Endpoints object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
send(ctx, ch, e.buildEndpoints(eps))
|
send(ctx, ch, e.buildEndpoints(eps))
|
||||||
|
@ -361,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
target = target.Merge(podLabels(pod))
|
target = target.Merge(podLabels(pod))
|
||||||
|
|
||||||
// Attach potential container port labels matching the endpoint port.
|
// Attach potential container port labels matching the endpoint port.
|
||||||
for _, c := range pod.Spec.Containers {
|
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
if port.Port == cport.ContainerPort {
|
if port.Port == cport.ContainerPort {
|
||||||
ports := strconv.FormatUint(uint64(port.Port), 10)
|
ports := strconv.FormatUint(uint64(port.Port), 10)
|
||||||
|
isInit := i >= len(pod.Spec.Containers)
|
||||||
|
|
||||||
target[podContainerNameLabel] = lv(c.Name)
|
target[podContainerNameLabel] = lv(c.Name)
|
||||||
target[podContainerImageLabel] = lv(c.Image)
|
target[podContainerImageLabel] = lv(c.Image)
|
||||||
target[podContainerPortNameLabel] = lv(cport.Name)
|
target[podContainerPortNameLabel] = lv(cport.Name)
|
||||||
target[podContainerPortNumberLabel] = lv(ports)
|
target[podContainerPortNumberLabel] = lv(ports)
|
||||||
target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
|
target[podContainerPortProtocolLabel] = lv(string(port.Protocol))
|
||||||
|
target[podContainerIsInit] = lv(strconv.FormatBool(isInit))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -397,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
|
|
||||||
v := eps.Labels[apiv1.EndpointsOverCapacity]
|
v := eps.Labels[apiv1.EndpointsOverCapacity]
|
||||||
if v == "truncated" {
|
if v == "truncated" {
|
||||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||||
}
|
}
|
||||||
if v == "warning" {
|
if v == "warning" {
|
||||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For all seen pods, check all container ports. If they were not covered
|
// For all seen pods, check all container ports. If they were not covered
|
||||||
|
@ -411,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range pe.pod.Spec.Containers {
|
containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
hasSeenPort := func() bool {
|
hasSeenPort := func() bool {
|
||||||
for _, eport := range pe.servicePorts {
|
for _, eport := range pe.servicePorts {
|
||||||
|
@ -428,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
|
isInit := i >= len(pe.pod.Spec.Containers)
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
|
@ -435,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
|
@ -454,7 +460,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
|
|
||||||
obj, exists, err := e.podStore.Get(p)
|
obj, exists, err := e.podStore.Get(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
e.logger.Error("resolving pod ref failed", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -470,7 +476,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
||||||
|
|
||||||
obj, exists, err := e.serviceStore.Get(svc)
|
obj, exists, err := e.serviceStore.Get(svc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
e.logger.Error("retrieving service failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -481,14 +487,14 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
||||||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||||
}
|
}
|
||||||
|
|
||||||
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet {
|
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet {
|
||||||
if nodeName == nil {
|
if nodeName == nil {
|
||||||
return tg
|
return tg
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
|
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err)
|
logger.Error("Error getting node", "node", *nodeName, "err", err)
|
||||||
return tg
|
return tg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -244,6 +244,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "1.2.3.4:9001",
|
"__address__": "1.2.3.4:9001",
|
||||||
|
@ -259,6 +260,7 @@ func TestEndpointsDiscoveryAdd(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9001",
|
"__meta_kubernetes_pod_container_port_number": "9001",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -821,6 +823,7 @@ func TestEndpointsDiscoveryNamespaces(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1078,6 +1081,7 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_container_port_number": "9000",
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1089,3 +1093,167 @@ func TestEndpointsDiscoveryUpdatePod(t *testing.T) {
|
||||||
},
|
},
|
||||||
}.Run(t)
|
}.Run(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointsDiscoverySidecarContainer(t *testing.T) {
|
||||||
|
objs := []runtime.Object{
|
||||||
|
&v1.Endpoints{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testsidecar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Subsets: []v1.EndpointSubset{
|
||||||
|
{
|
||||||
|
Addresses: []v1.EndpointAddress{
|
||||||
|
{
|
||||||
|
IP: "4.3.2.1",
|
||||||
|
TargetRef: &v1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.EndpointPort{
|
||||||
|
{
|
||||||
|
Name: "testport",
|
||||||
|
Port: 9000,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
Port: 9111,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "default",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "ic1",
|
||||||
|
Image: "ic1:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
ContainerPort: 1111,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ic2",
|
||||||
|
Image: "ic2:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
ContainerPort: 9111,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "c1:latest",
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
HostIP: "2.3.4.5",
|
||||||
|
PodIP: "4.3.2.1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 1,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
|
"endpoints/default/testsidecar": {
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:9000",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "testpod",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "c1",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:9111",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||||
|
"__meta_kubernetes_endpoint_address_target_name": "testpod",
|
||||||
|
"__meta_kubernetes_endpoint_port_name": "initport",
|
||||||
|
"__meta_kubernetes_endpoint_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_endpoint_ready": "true",
|
||||||
|
"__meta_kubernetes_pod_container_image": "ic2:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "ic2",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "initport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "9111",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:1111",
|
||||||
|
"__meta_kubernetes_pod_container_image": "ic1:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "ic1",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "initport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "1111",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: model.LabelSet{
|
||||||
|
"__meta_kubernetes_endpoints_name": "testsidecar",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
},
|
||||||
|
Source: "endpoints/default/testsidecar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/discovery/v1"
|
v1 "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
@ -35,7 +35,7 @@ import (
|
||||||
|
|
||||||
// EndpointSlice discovers new endpoint targets.
|
// EndpointSlice discovers new endpoint targets.
|
||||||
type EndpointSlice struct {
|
type EndpointSlice struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
endpointSliceInf cache.SharedIndexInformer
|
endpointSliceInf cache.SharedIndexInformer
|
||||||
serviceInf cache.SharedInformer
|
serviceInf cache.SharedInformer
|
||||||
|
@ -51,9 +51,9 @@ type EndpointSlice struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpointSlice returns a new endpointslice discovery.
|
// NewEndpointSlice returns a new endpointslice discovery.
|
||||||
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
|
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
|
||||||
|
@ -92,13 +92,13 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err)
|
l.Error("Error adding endpoint slices event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceUpdate := func(o interface{}) {
|
serviceUpdate := func(o interface{}) {
|
||||||
svc, err := convertToService(o)
|
svc, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
e.logger.Error("converting to Service object failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
for _, obj := range e.endpointSliceStore.List() {
|
for _, obj := range e.endpointSliceStore.List() {
|
||||||
esa, err := e.getEndpointSliceAdaptor(obj)
|
esa, err := e.getEndpointSliceAdaptor(obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
|
e.logger.Error("converting to EndpointSlice object failed", "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name {
|
if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name {
|
||||||
|
@ -131,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
l.Error("Error adding services event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.withNodeMetadata {
|
if e.withNodeMetadata {
|
||||||
|
@ -150,7 +150,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
l.Error("Error adding nodes event handler.", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
||||||
func (e *EndpointSlice) enqueueNode(nodeName string) {
|
func (e *EndpointSlice) enqueueNode(nodeName string) {
|
||||||
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
||||||
}
|
}
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
e.logger.Error("endpointslice informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -212,13 +212,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
|
|
||||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
|
e.logger.Error("splitting key failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
o, exists, err := e.endpointSliceStore.GetByKey(key)
|
o, exists, err := e.endpointSliceStore.GetByKey(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
|
e.logger.Error("getting object from store failed", "key", key)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -228,7 +228,7 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
|
||||||
|
|
||||||
esa, err := e.getEndpointSliceAdaptor(o)
|
esa, err := e.getEndpointSliceAdaptor(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
|
e.logger.Error("converting to EndpointSlice object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -377,19 +377,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
target = target.Merge(podLabels(pod))
|
target = target.Merge(podLabels(pod))
|
||||||
|
|
||||||
// Attach potential container port labels matching the endpoint port.
|
// Attach potential container port labels matching the endpoint port.
|
||||||
for _, c := range pod.Spec.Containers {
|
containers := append(pod.Spec.Containers, pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
if port.port() == nil {
|
if port.port() == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if *port.port() == cport.ContainerPort {
|
if *port.port() == cport.ContainerPort {
|
||||||
ports := strconv.FormatUint(uint64(*port.port()), 10)
|
ports := strconv.FormatUint(uint64(*port.port()), 10)
|
||||||
|
isInit := i >= len(pod.Spec.Containers)
|
||||||
|
|
||||||
target[podContainerNameLabel] = lv(c.Name)
|
target[podContainerNameLabel] = lv(c.Name)
|
||||||
target[podContainerImageLabel] = lv(c.Image)
|
target[podContainerImageLabel] = lv(c.Image)
|
||||||
target[podContainerPortNameLabel] = lv(cport.Name)
|
target[podContainerPortNameLabel] = lv(cport.Name)
|
||||||
target[podContainerPortNumberLabel] = lv(ports)
|
target[podContainerPortNumberLabel] = lv(ports)
|
||||||
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
|
target[podContainerPortProtocolLabel] = lv(string(cport.Protocol))
|
||||||
|
target[podContainerIsInit] = lv(strconv.FormatBool(isInit))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -417,7 +421,8 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range pe.pod.Spec.Containers {
|
containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...)
|
||||||
|
for i, c := range containers {
|
||||||
for _, cport := range c.Ports {
|
for _, cport := range c.Ports {
|
||||||
hasSeenPort := func() bool {
|
hasSeenPort := func() bool {
|
||||||
for _, eport := range pe.servicePorts {
|
for _, eport := range pe.servicePorts {
|
||||||
|
@ -437,6 +442,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10))
|
||||||
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
ports := strconv.FormatUint(uint64(cport.ContainerPort), 10)
|
||||||
|
|
||||||
|
isInit := i >= len(pe.pod.Spec.Containers)
|
||||||
target := model.LabelSet{
|
target := model.LabelSet{
|
||||||
model.AddressLabel: lv(a),
|
model.AddressLabel: lv(a),
|
||||||
podContainerNameLabel: lv(c.Name),
|
podContainerNameLabel: lv(c.Name),
|
||||||
|
@ -444,6 +450,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou
|
||||||
podContainerPortNameLabel: lv(cport.Name),
|
podContainerPortNameLabel: lv(cport.Name),
|
||||||
podContainerPortNumberLabel: lv(ports),
|
podContainerPortNumberLabel: lv(ports),
|
||||||
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
podContainerPortProtocolLabel: lv(string(cport.Protocol)),
|
||||||
|
podContainerIsInit: lv(strconv.FormatBool(isInit)),
|
||||||
}
|
}
|
||||||
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod)))
|
||||||
}
|
}
|
||||||
|
@ -463,7 +470,7 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
||||||
|
|
||||||
obj, exists, err := e.podStore.Get(p)
|
obj, exists, err := e.podStore.Get(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
e.logger.Error("resolving pod ref failed", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
@ -488,7 +495,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
|
||||||
|
|
||||||
obj, exists, err := e.serviceStore.Get(svc)
|
obj, exists, err := e.serviceStore.Get(svc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
e.logger.Error("retrieving service failed", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
|
|
|
@ -291,6 +291,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_phase": "",
|
"__meta_kubernetes_pod_phase": "",
|
||||||
"__meta_kubernetes_pod_ready": "unknown",
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"__address__": "1.2.3.4:9001",
|
"__address__": "1.2.3.4:9001",
|
||||||
|
@ -306,6 +307,7 @@ func TestEndpointSliceDiscoveryAdd(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_phase": "",
|
"__meta_kubernetes_pod_phase": "",
|
||||||
"__meta_kubernetes_pod_ready": "unknown",
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -986,6 +988,7 @@ func TestEndpointSliceDiscoveryNamespaces(t *testing.T) {
|
||||||
"__meta_kubernetes_pod_phase": "",
|
"__meta_kubernetes_pod_phase": "",
|
||||||
"__meta_kubernetes_pod_ready": "unknown",
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
"__meta_kubernetes_pod_uid": "deadbeef",
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Labels: model.LabelSet{
|
Labels: model.LabelSet{
|
||||||
|
@ -1199,3 +1202,165 @@ func TestEndpointSliceInfIndexersCount(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEndpointSliceDiscoverySidecarContainer(t *testing.T) {
|
||||||
|
objs := []runtime.Object{
|
||||||
|
&v1.EndpointSlice{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testsidecar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
AddressType: v1.AddressTypeIPv4,
|
||||||
|
Ports: []v1.EndpointPort{
|
||||||
|
{
|
||||||
|
Name: strptr("testport"),
|
||||||
|
Port: int32ptr(9000),
|
||||||
|
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: strptr("initport"),
|
||||||
|
Port: int32ptr(9111),
|
||||||
|
Protocol: protocolptr(corev1.ProtocolTCP),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Endpoints: []v1.Endpoint{
|
||||||
|
{
|
||||||
|
Addresses: []string{"4.3.2.1"},
|
||||||
|
TargetRef: &corev1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&corev1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "testpod",
|
||||||
|
Namespace: "default",
|
||||||
|
UID: types.UID("deadbeef"),
|
||||||
|
},
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
NodeName: "testnode",
|
||||||
|
InitContainers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "ic1",
|
||||||
|
Image: "ic1:latest",
|
||||||
|
Ports: []corev1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
ContainerPort: 1111,
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ic2",
|
||||||
|
Image: "ic2:latest",
|
||||||
|
Ports: []corev1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "initport",
|
||||||
|
ContainerPort: 9111,
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "c1",
|
||||||
|
Image: "c1:latest",
|
||||||
|
Ports: []corev1.ContainerPort{
|
||||||
|
{
|
||||||
|
Name: "mainport",
|
||||||
|
ContainerPort: 9000,
|
||||||
|
Protocol: corev1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Status: corev1.PodStatus{
|
||||||
|
HostIP: "2.3.4.5",
|
||||||
|
PodIP: "4.3.2.1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{}, objs...)
|
||||||
|
|
||||||
|
k8sDiscoveryTest{
|
||||||
|
discovery: n,
|
||||||
|
expectedMaxItems: 1,
|
||||||
|
expectedRes: map[string]*targetgroup.Group{
|
||||||
|
"endpointslice/default/testsidecar": {
|
||||||
|
Targets: []model.LabelSet{
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:9000",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "testpod",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9000",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "testport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_container_image": "c1:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "c1",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "mainport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "9000",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "false",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:9111",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
|
||||||
|
"__meta_kubernetes_endpointslice_address_target_name": "testpod",
|
||||||
|
"__meta_kubernetes_endpointslice_port": "9111",
|
||||||
|
"__meta_kubernetes_endpointslice_port_name": "initport",
|
||||||
|
"__meta_kubernetes_endpointslice_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_container_image": "ic2:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "ic2",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "initport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "9111",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"__address__": "4.3.2.1:1111",
|
||||||
|
"__meta_kubernetes_pod_container_image": "ic1:latest",
|
||||||
|
"__meta_kubernetes_pod_container_name": "ic1",
|
||||||
|
"__meta_kubernetes_pod_container_port_name": "initport",
|
||||||
|
"__meta_kubernetes_pod_container_port_number": "1111",
|
||||||
|
"__meta_kubernetes_pod_container_port_protocol": "TCP",
|
||||||
|
"__meta_kubernetes_pod_host_ip": "2.3.4.5",
|
||||||
|
"__meta_kubernetes_pod_ip": "4.3.2.1",
|
||||||
|
"__meta_kubernetes_pod_name": "testpod",
|
||||||
|
"__meta_kubernetes_pod_node_name": "testnode",
|
||||||
|
"__meta_kubernetes_pod_phase": "",
|
||||||
|
"__meta_kubernetes_pod_ready": "unknown",
|
||||||
|
"__meta_kubernetes_pod_uid": "deadbeef",
|
||||||
|
"__meta_kubernetes_pod_container_init": "true",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Labels: model.LabelSet{
|
||||||
|
"__meta_kubernetes_endpointslice_address_type": "IPv4",
|
||||||
|
"__meta_kubernetes_endpointslice_name": "testsidecar",
|
||||||
|
"__meta_kubernetes_namespace": "default",
|
||||||
|
},
|
||||||
|
Source: "endpointslice/default/testsidecar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}.Run(t)
|
||||||
|
}
|
||||||
|
|
|
@ -17,10 +17,9 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
v1 "k8s.io/api/networking/v1"
|
v1 "k8s.io/api/networking/v1"
|
||||||
|
@ -32,14 +31,14 @@ import (
|
||||||
|
|
||||||
// Ingress implements discovery of Kubernetes ingress.
|
// Ingress implements discovery of Kubernetes ingress.
|
||||||
type Ingress struct {
|
type Ingress struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
informer cache.SharedInformer
|
informer cache.SharedInformer
|
||||||
store cache.Store
|
store cache.Store
|
||||||
queue *workqueue.Type
|
queue *workqueue.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewIngress returns a new ingress discovery.
|
// NewIngress returns a new ingress discovery.
|
||||||
func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress {
|
func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress {
|
||||||
ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd)
|
ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd)
|
||||||
ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate)
|
ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate)
|
||||||
ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete)
|
ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete)
|
||||||
|
@ -66,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err)
|
l.Error("Error adding ingresses event handler.", "err", err)
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -86,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {
|
if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(i.logger).Log("msg", "ingress informer unable to sync cache")
|
i.logger.Error("ingress informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -127,7 +126,7 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
||||||
case *v1.Ingress:
|
case *v1.Ingress:
|
||||||
ia = newIngressAdaptorFromV1(ingress)
|
ia = newIngressAdaptorFromV1(ingress)
|
||||||
default:
|
default:
|
||||||
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
|
i.logger.Error("converting to Ingress object failed", "err",
|
||||||
fmt.Errorf("received unexpected object: %v", o))
|
fmt.Errorf("received unexpected object: %v", o))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -25,11 +26,10 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
disv1 "k8s.io/api/discovery/v1"
|
disv1 "k8s.io/api/discovery/v1"
|
||||||
|
@ -260,7 +260,7 @@ type Discovery struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
client kubernetes.Interface
|
client kubernetes.Interface
|
||||||
role Role
|
role Role
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
namespaceDiscovery *NamespaceDiscovery
|
namespaceDiscovery *NamespaceDiscovery
|
||||||
discoverers []discovery.Discoverer
|
discoverers []discovery.Discoverer
|
||||||
selectors roleSelector
|
selectors roleSelector
|
||||||
|
@ -285,14 +285,14 @@ func (d *Discovery) getNamespaces() []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Kubernetes discovery for the given role.
|
// New creates a new Kubernetes discovery for the given role.
|
||||||
func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
||||||
m, ok := metrics.(*kubernetesMetrics)
|
m, ok := metrics.(*kubernetesMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
kcfg *rest.Config
|
kcfg *rest.Config
|
||||||
|
@ -324,7 +324,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di
|
||||||
ownNamespace = string(ownNamespaceContents)
|
ownNamespace = string(ownNamespaceContents)
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
l.Info("Using pod service account via in-cluster config")
|
||||||
default:
|
default:
|
||||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -446,7 +446,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
go nodeInf.Run(ctx.Done())
|
go nodeInf.Run(ctx.Done())
|
||||||
}
|
}
|
||||||
eps := NewEndpointSlice(
|
eps := NewEndpointSlice(
|
||||||
log.With(d.logger, "role", "endpointslice"),
|
d.logger.With("role", "endpointslice"),
|
||||||
informer,
|
informer,
|
||||||
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||||
|
@ -506,7 +506,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
|
|
||||||
eps := NewEndpoints(
|
eps := NewEndpoints(
|
||||||
log.With(d.logger, "role", "endpoint"),
|
d.logger.With("role", "endpoint"),
|
||||||
d.newEndpointsByNodeInformer(elw),
|
d.newEndpointsByNodeInformer(elw),
|
||||||
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||||
|
@ -540,7 +540,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
pod := NewPod(
|
pod := NewPod(
|
||||||
log.With(d.logger, "role", "pod"),
|
d.logger.With("role", "pod"),
|
||||||
d.newPodsByNodeInformer(plw),
|
d.newPodsByNodeInformer(plw),
|
||||||
nodeInformer,
|
nodeInformer,
|
||||||
d.metrics.eventCount,
|
d.metrics.eventCount,
|
||||||
|
@ -564,7 +564,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
svc := NewService(
|
svc := NewService(
|
||||||
log.With(d.logger, "role", "service"),
|
d.logger.With("role", "service"),
|
||||||
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||||
d.metrics.eventCount,
|
d.metrics.eventCount,
|
||||||
)
|
)
|
||||||
|
@ -589,7 +589,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||||
ingress := NewIngress(
|
ingress := NewIngress(
|
||||||
log.With(d.logger, "role", "ingress"),
|
d.logger.With("role", "ingress"),
|
||||||
informer,
|
informer,
|
||||||
d.metrics.eventCount,
|
d.metrics.eventCount,
|
||||||
)
|
)
|
||||||
|
@ -598,11 +598,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
}
|
}
|
||||||
case RoleNode:
|
case RoleNode:
|
||||||
nodeInformer := d.newNodeInformer(ctx)
|
nodeInformer := d.newNodeInformer(ctx)
|
||||||
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount)
|
node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount)
|
||||||
d.discoverers = append(d.discoverers, node)
|
d.discoverers = append(d.discoverers, node)
|
||||||
go node.informer.Run(ctx.Done())
|
go node.informer.Run(ctx.Done())
|
||||||
default:
|
default:
|
||||||
level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role)
|
d.logger.Error("unknown Kubernetes discovery kind", "role", d.role)
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
|
@ -20,8 +20,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
@ -71,7 +71,7 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer
|
||||||
|
|
||||||
d := &Discovery{
|
d := &Discovery{
|
||||||
client: clientset,
|
client: clientset,
|
||||||
logger: log.NewNopLogger(),
|
logger: promslog.NewNopLogger(),
|
||||||
role: role,
|
role: role,
|
||||||
namespaceDiscovery: &nsDiscovery,
|
namespaceDiscovery: &nsDiscovery,
|
||||||
ownNamespace: "own-ns",
|
ownNamespace: "own-ns",
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
@ -38,16 +38,16 @@ const (
|
||||||
|
|
||||||
// Node discovers Kubernetes nodes.
|
// Node discovers Kubernetes nodes.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
informer cache.SharedInformer
|
informer cache.SharedInformer
|
||||||
store cache.Store
|
store cache.Store
|
||||||
queue *workqueue.Type
|
queue *workqueue.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNode returns a new node discovery.
|
// NewNode returns a new node discovery.
|
||||||
func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node {
|
func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd)
|
nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd)
|
||||||
|
@ -76,7 +76,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
l.Error("Error adding nodes event handler.", "err", err)
|
||||||
}
|
}
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) {
|
if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(n.logger).Log("msg", "node informer unable to sync cache")
|
n.logger.Error("node informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
|
||||||
}
|
}
|
||||||
node, err := convertToNode(o)
|
node, err := convertToNode(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err)
|
n.logger.Error("converting to Node object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
send(ctx, ch, n.buildNode(node))
|
send(ctx, ch, n.buildNode(node))
|
||||||
|
@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
|
||||||
|
|
||||||
addr, addrMap, err := nodeAddress(node)
|
addr, addrMap, err := nodeAddress(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Warn(n.logger).Log("msg", "No node address found", "err", err)
|
n.logger.Warn("No node address found", "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10))
|
addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10))
|
||||||
|
|
|
@ -17,14 +17,14 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
@ -44,14 +44,14 @@ type Pod struct {
|
||||||
nodeInf cache.SharedInformer
|
nodeInf cache.SharedInformer
|
||||||
withNodeMetadata bool
|
withNodeMetadata bool
|
||||||
store cache.Store
|
store cache.Store
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
queue *workqueue.Type
|
queue *workqueue.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPod creates a new pod discovery.
|
// NewPod creates a new pod discovery.
|
||||||
func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod {
|
func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd)
|
podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd)
|
||||||
|
@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
l.Error("Error adding pods event handler.", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.withNodeMetadata {
|
if p.withNodeMetadata {
|
||||||
|
@ -100,7 +100,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
l.Error("Error adding pods event handler.", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(p.logger).Log("msg", "pod informer unable to sync cache")
|
p.logger.Error("pod informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
|
||||||
}
|
}
|
||||||
pod, err := convertToPod(o)
|
pod, err := convertToPod(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
|
p.logger.Error("converting to Pod object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
send(ctx, ch, p.buildPod(pod))
|
send(ctx, ch, p.buildPod(pod))
|
||||||
|
@ -246,7 +246,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe
|
||||||
func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string {
|
func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string {
|
||||||
cStatus, err := p.findPodContainerStatus(statuses, containerName)
|
cStatus, err := p.findPodContainerStatus(statuses, containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err)
|
p.logger.Debug("cannot find container ID", "err", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return cStatus.ContainerID
|
return cStatus.ContainerID
|
||||||
|
@ -315,7 +315,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
||||||
func (p *Pod) enqueuePodsForNode(nodeName string) {
|
func (p *Pod) enqueuePodsForNode(nodeName string) {
|
||||||
pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err)
|
p.logger.Error("Error getting pods for node", "node", nodeName, "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
apiv1 "k8s.io/api/core/v1"
|
apiv1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
|
@ -33,16 +33,16 @@ import (
|
||||||
|
|
||||||
// Service implements discovery of Kubernetes services.
|
// Service implements discovery of Kubernetes services.
|
||||||
type Service struct {
|
type Service struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
informer cache.SharedInformer
|
informer cache.SharedInformer
|
||||||
store cache.Store
|
store cache.Store
|
||||||
queue *workqueue.Type
|
queue *workqueue.Type
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService returns a new service discovery.
|
// NewService returns a new service discovery.
|
||||||
func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service {
|
func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
|
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
|
||||||
|
@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
l.Error("Error adding services event handler.", "err", err)
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) {
|
if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(s.logger).Log("msg", "service informer unable to sync cache")
|
s.logger.Error("service informer unable to sync cache")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
||||||
}
|
}
|
||||||
eps, err := convertToService(o)
|
eps, err := convertToService(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err)
|
s.logger.Error("converting to Service object failed", "err", err)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
send(ctx, ch, s.buildService(eps))
|
send(ctx, ch, s.buildService(eps))
|
||||||
|
|
|
@ -17,13 +17,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/linode/linodego"
|
"github.com/linode/linodego"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -138,7 +138,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*linodeMetrics)
|
m, ok := metrics.(*linodeMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -19,10 +19,10 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -238,7 +238,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdmock.Endpoint())
|
endpoint, err := url.Parse(sdmock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -16,14 +16,14 @@ package discovery
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager is the Discovery Manager constructor.
|
// NewManager is the Discovery Manager constructor.
|
||||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
|
func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
mgr := &Manager{
|
mgr := &Manager{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
||||||
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
|
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||||
mgr.metrics = metrics
|
mgr.metrics = metrics
|
||||||
} else {
|
} else {
|
||||||
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {
|
||||||
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
||||||
// Targets are grouped by the target set name.
|
// Targets are grouped by the target set name.
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
name string
|
name string
|
||||||
httpOpts []config.HTTPClientOption
|
httpOpts []config.HTTPClientOption
|
||||||
mtx sync.RWMutex
|
mtx sync.RWMutex
|
||||||
|
@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) startProvider(ctx context.Context, p *Provider) {
|
func (m *Manager) startProvider(ctx context.Context, p *Provider) {
|
||||||
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
updates := make(chan []*targetgroup.Group)
|
updates := make(chan []*targetgroup.Group)
|
||||||
|
|
||||||
|
@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
|
||||||
case tgs, ok := <-updates:
|
case tgs, ok := <-updates:
|
||||||
m.metrics.ReceivedUpdates.Inc()
|
m.metrics.ReceivedUpdates.Inc()
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
m.logger.Debug("Discoverer channel closed", "provider", p.name)
|
||||||
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
return
|
return
|
||||||
|
@ -364,7 +364,7 @@ func (m *Manager) sender() {
|
||||||
case m.syncCh <- m.allGroups():
|
case m.syncCh <- m.allGroups():
|
||||||
default:
|
default:
|
||||||
m.metrics.DelayedUpdates.Inc()
|
m.metrics.DelayedUpdates.Inc()
|
||||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle")
|
||||||
select {
|
select {
|
||||||
case m.triggerSend <- struct{}{}:
|
case m.triggerSend <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
|
@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
||||||
}
|
}
|
||||||
typ := cfg.Name()
|
typ := cfg.Name()
|
||||||
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
||||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
Logger: m.logger.With("discovery", typ, "config", setName),
|
||||||
HTTPClientOptions: m.httpOpts,
|
HTTPClientOptions: m.httpOpts,
|
||||||
Metrics: m.sdMetrics[typ],
|
Metrics: m.sdMetrics[typ],
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||||
failed++
|
failed++
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,10 +22,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
|
@ -675,7 +675,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
|
|
||||||
|
@ -791,7 +791,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -828,7 +828,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -868,7 +868,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -911,7 +911,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -979,7 +979,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -1023,7 +1023,7 @@ func TestDiscovererConfigs(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -1060,7 +1060,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -1141,7 +1141,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -1202,7 +1202,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -1454,7 +1454,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
_, sdMetrics := NewTestMetrics(t, reg)
|
_, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
discoveryManager.updatert = 100 * time.Millisecond
|
discoveryManager.updatert = 100 * time.Millisecond
|
||||||
go discoveryManager.Run()
|
go discoveryManager.Run()
|
||||||
|
@ -1551,7 +1551,7 @@ func TestUnregisterMetrics(t *testing.T) {
|
||||||
|
|
||||||
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
|
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
|
||||||
|
|
||||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||||
// discoveryManager will be nil if there was an error configuring metrics.
|
// discoveryManager will be nil if there was an error configuring metrics.
|
||||||
require.NotNil(t, discoveryManager)
|
require.NotNil(t, discoveryManager)
|
||||||
// Unregister all metrics.
|
// Unregister all metrics.
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -27,7 +28,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -140,7 +140,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Marathon Discovery.
|
// NewDiscovery returns a new Marathon Discovery.
|
||||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*marathonMetrics)
|
m, ok := metrics.(*marathonMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -16,6 +16,7 @@ package moby
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -28,7 +29,6 @@ import (
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -128,7 +128,7 @@ type DockerDiscovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
||||||
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
||||||
m, ok := metrics.(*dockerMetrics)
|
m, ok := metrics.(*dockerMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -19,9 +19,9 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ host: %s
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -226,7 +226,7 @@ host: %s
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -16,13 +16,13 @@ package moby
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -125,7 +125,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*dockerswarmMetrics)
|
m, ok := metrics.(*dockerswarmMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ host: %s
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ host: %s
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -349,7 +349,7 @@ filters:
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ host: %s
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -17,12 +17,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
nomad "github.com/hashicorp/nomad/api"
|
nomad "github.com/hashicorp/nomad/api"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -121,7 +121,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*nomadMetrics)
|
m, ok := metrics.(*nomadMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -21,9 +21,9 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -160,7 +160,7 @@ func TestNomadSDRefresh(t *testing.T) {
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
tgs, err := d.refresh(context.Background())
|
tgs, err := d.refresh(context.Background())
|
||||||
|
|
|
@ -16,10 +16,10 @@ package openstack
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack"
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
||||||
|
@ -43,14 +43,14 @@ type HypervisorDiscovery struct {
|
||||||
provider *gophercloud.ProviderClient
|
provider *gophercloud.ProviderClient
|
||||||
authOpts *gophercloud.AuthOptions
|
authOpts *gophercloud.AuthOptions
|
||||||
region string
|
region string
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
port int
|
port int
|
||||||
availability gophercloud.Availability
|
availability gophercloud.Availability
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHypervisorDiscovery returns a new hypervisor discovery.
|
// newHypervisorDiscovery returns a new hypervisor discovery.
|
||||||
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
||||||
port int, region string, availability gophercloud.Availability, l log.Logger,
|
port int, region string, availability gophercloud.Availability, l *slog.Logger,
|
||||||
) *HypervisorDiscovery {
|
) *HypervisorDiscovery {
|
||||||
return &HypervisorDiscovery{
|
return &HypervisorDiscovery{
|
||||||
provider: provider, authOpts: opts,
|
provider: provider, authOpts: opts,
|
||||||
|
|
|
@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
cancel()
|
cancel()
|
||||||
_, err := hypervisor.refresh(ctx)
|
_, err := hypervisor.refresh(ctx)
|
||||||
require.Error(t, err)
|
require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||||
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,17 +16,17 @@ package openstack
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack"
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
||||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||||
"github.com/gophercloud/gophercloud/pagination"
|
"github.com/gophercloud/gophercloud/pagination"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
"github.com/prometheus/prometheus/util/strutil"
|
"github.com/prometheus/prometheus/util/strutil"
|
||||||
|
@ -52,7 +52,7 @@ type InstanceDiscovery struct {
|
||||||
provider *gophercloud.ProviderClient
|
provider *gophercloud.ProviderClient
|
||||||
authOpts *gophercloud.AuthOptions
|
authOpts *gophercloud.AuthOptions
|
||||||
region string
|
region string
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
port int
|
port int
|
||||||
allTenants bool
|
allTenants bool
|
||||||
availability gophercloud.Availability
|
availability gophercloud.Availability
|
||||||
|
@ -60,10 +60,10 @@ type InstanceDiscovery struct {
|
||||||
|
|
||||||
// NewInstanceDiscovery returns a new instance discovery.
|
// NewInstanceDiscovery returns a new instance discovery.
|
||||||
func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
||||||
port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger,
|
port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger,
|
||||||
) *InstanceDiscovery {
|
) *InstanceDiscovery {
|
||||||
if l == nil {
|
if l == nil {
|
||||||
l = log.NewNopLogger()
|
l = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
return &InstanceDiscovery{
|
return &InstanceDiscovery{
|
||||||
provider: provider, authOpts: opts,
|
provider: provider, authOpts: opts,
|
||||||
|
@ -134,7 +134,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
|
|
||||||
for _, s := range instanceList {
|
for _, s := range instanceList {
|
||||||
if len(s.Addresses) == 0 {
|
if len(s.Addresses) == 0 {
|
||||||
level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID)
|
i.logger.Info("Got no IP address", "instance", s.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
if !nameOk {
|
if !nameOk {
|
||||||
flavorID, idOk := s.Flavor["id"].(string)
|
flavorID, idOk := s.Flavor["id"].(string)
|
||||||
if !idOk {
|
if !idOk {
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string")
|
i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||||
|
@ -171,22 +171,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
||||||
for pool, address := range s.Addresses {
|
for pool, address := range s.Addresses {
|
||||||
md, ok := address.([]interface{})
|
md, ok := address.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for address, expected array")
|
i.logger.Warn("Invalid type for address, expected array")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(md) == 0 {
|
if len(md) == 0 {
|
||||||
level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID)
|
i.logger.Debug("Got no IP address", "instance", s.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, address := range md {
|
for _, address := range md {
|
||||||
md1, ok := address.(map[string]interface{})
|
md1, ok := address.(map[string]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict")
|
i.logger.Warn("Invalid type for address, expected dict")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
addr, ok := md1["addr"].(string)
|
addr, ok := md1["addr"].(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
level.Warn(i.logger).Log("msg", "Invalid type for address, expected string")
|
i.logger.Warn("Invalid type for address, expected string")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, ok := floatingIPPresent[addr]; ok {
|
if _, ok := floatingIPPresent[addr]; ok {
|
||||||
|
|
|
@ -134,6 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
cancel()
|
cancel()
|
||||||
_, err := hypervisor.refresh(ctx)
|
_, err := hypervisor.refresh(ctx)
|
||||||
require.Error(t, err)
|
require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||||
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,10 +17,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/gophercloud/gophercloud"
|
"github.com/gophercloud/gophercloud"
|
||||||
"github.com/gophercloud/gophercloud/openstack"
|
"github.com/gophercloud/gophercloud/openstack"
|
||||||
"github.com/mwitkow/go-conntrack"
|
"github.com/mwitkow/go-conntrack"
|
||||||
|
@ -142,7 +142,7 @@ type refresher interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
|
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*openstackMetrics)
|
m, ok := metrics.(*openstackMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
@ -163,7 +163,7 @@ func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetr
|
||||||
), nil
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
|
||||||
var opts gophercloud.AuthOptions
|
var opts gophercloud.AuthOptions
|
||||||
if conf.IdentityEndpoint == "" {
|
if conf.IdentityEndpoint == "" {
|
||||||
var err error
|
var err error
|
||||||
|
|
|
@ -16,13 +16,12 @@ package ovhcloud
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/ovh/go-ovh/ovh"
|
"github.com/ovh/go-ovh/ovh"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -55,10 +54,10 @@ type dedicatedServer struct {
|
||||||
type dedicatedServerDiscovery struct {
|
type dedicatedServerDiscovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
config *SDConfig
|
config *SDConfig
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery {
|
func newDedicatedServerDiscovery(conf *SDConfig, logger *slog.Logger) *dedicatedServerDiscovery {
|
||||||
return &dedicatedServerDiscovery{config: conf, logger: logger}
|
return &dedicatedServerDiscovery{config: conf, logger: logger}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,10 +114,7 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
|
||||||
for _, dedicatedServerName := range dedicatedServerList {
|
for _, dedicatedServerName := range dedicatedServerList {
|
||||||
dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName)
|
dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error())
|
d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error())
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer)
|
dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer)
|
||||||
|
|
|
@ -21,8 +21,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
@ -41,7 +41,7 @@ application_secret: %s
|
||||||
consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest)
|
consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest)
|
||||||
|
|
||||||
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
|
||||||
d, err := newRefresher(&cfg, log.NewNopLogger())
|
d, err := newRefresher(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
targetGroups, err := d.refresh(ctx)
|
targetGroups, err := d.refresh(ctx)
|
||||||
|
|
|
@ -17,10 +17,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/ovh/go-ovh/ovh"
|
"github.com/ovh/go-ovh/ovh"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -137,7 +137,7 @@ func parseIPList(ipList []string) ([]netip.Addr, error) {
|
||||||
return ipAddresses, nil
|
return ipAddresses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
|
func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
|
||||||
switch conf.Service {
|
switch conf.Service {
|
||||||
case "vps":
|
case "vps":
|
||||||
return newVpsDiscovery(conf, logger), nil
|
return newVpsDiscovery(conf, logger), nil
|
||||||
|
@ -148,7 +148,7 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
|
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*ovhcloudMetrics)
|
m, ok := metrics.(*ovhcloudMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -20,11 +20,11 @@ import (
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/util/testutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -121,7 +121,7 @@ func TestParseIPs(t *testing.T) {
|
||||||
|
|
||||||
func TestDiscoverer(t *testing.T) {
|
func TestDiscoverer(t *testing.T) {
|
||||||
conf, _ := getMockConf("vps")
|
conf, _ := getMockConf("vps")
|
||||||
logger := testutil.NewLogger(t)
|
logger := promslog.NewNopLogger()
|
||||||
|
|
||||||
reg := prometheus.NewRegistry()
|
reg := prometheus.NewRegistry()
|
||||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||||
|
|
|
@ -16,13 +16,12 @@ package ovhcloud
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/ovh/go-ovh/ovh"
|
"github.com/ovh/go-ovh/ovh"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
@ -68,10 +67,10 @@ type virtualPrivateServer struct {
|
||||||
type vpsDiscovery struct {
|
type vpsDiscovery struct {
|
||||||
*refresh.Discovery
|
*refresh.Discovery
|
||||||
config *SDConfig
|
config *SDConfig
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery {
|
func newVpsDiscovery(conf *SDConfig, logger *slog.Logger) *vpsDiscovery {
|
||||||
return &vpsDiscovery{config: conf, logger: logger}
|
return &vpsDiscovery{config: conf, logger: logger}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,10 +132,7 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
||||||
for _, vpsName := range vpsList {
|
for _, vpsName := range vpsList {
|
||||||
vpsDetailed, err := getVpsDetails(client, vpsName)
|
vpsDetailed, err := getVpsDetails(client, vpsName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error())
|
d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error())
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
vpsDetailedList = append(vpsDetailedList, *vpsDetailed)
|
vpsDetailedList = append(vpsDetailedList, *vpsDetailed)
|
||||||
|
|
|
@ -23,8 +23,8 @@ import (
|
||||||
|
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
|
||||||
|
|
||||||
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
|
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
|
||||||
|
|
||||||
d, err := newRefresher(&cfg, log.NewNopLogger())
|
d, err := newRefresher(&cfg, promslog.NewNopLogger())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
targetGroups, err := d.refresh(ctx)
|
targetGroups, err := d.refresh(ctx)
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -27,11 +28,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/grafana/regexp"
|
"github.com/grafana/regexp"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/prometheus/common/version"
|
"github.com/prometheus/common/version"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -138,14 +139,14 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*puppetdbMetrics)
|
m, ok := metrics.(*puppetdbMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
}
|
}
|
||||||
|
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
||||||
|
|
|
@ -22,10 +22,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -70,7 +70,7 @@ func TestPuppetSlashInURL(t *testing.T) {
|
||||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, apiURL, d.url)
|
require.Equal(t, apiURL, d.url)
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ func TestPuppetDBRefresh(t *testing.T) {
|
||||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -142,7 +142,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
||||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -201,7 +201,7 @@ func TestPuppetDBInvalidCode(t *testing.T) {
|
||||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
@ -229,7 +229,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) {
|
||||||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||||
require.NoError(t, metrics.Register())
|
require.NoError(t, metrics.Register())
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
|
@ -16,17 +16,17 @@ package refresh
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Logger log.Logger
|
Logger *slog.Logger
|
||||||
Mech string
|
Mech string
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
RefreshF func(ctx context.Context) ([]*targetgroup.Group, error)
|
RefreshF func(ctx context.Context) ([]*targetgroup.Group, error)
|
||||||
|
@ -35,7 +35,7 @@ type Options struct {
|
||||||
|
|
||||||
// Discovery implements the Discoverer interface.
|
// Discovery implements the Discoverer interface.
|
||||||
type Discovery struct {
|
type Discovery struct {
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
refreshf func(ctx context.Context) ([]*targetgroup.Group, error)
|
refreshf func(ctx context.Context) ([]*targetgroup.Group, error)
|
||||||
metrics *discovery.RefreshMetrics
|
metrics *discovery.RefreshMetrics
|
||||||
|
@ -45,9 +45,9 @@ type Discovery struct {
|
||||||
func NewDiscovery(opts Options) *Discovery {
|
func NewDiscovery(opts Options) *Discovery {
|
||||||
m := opts.MetricsInstantiator.Instantiate(opts.Mech)
|
m := opts.MetricsInstantiator.Instantiate(opts.Mech)
|
||||||
|
|
||||||
var logger log.Logger
|
var logger *slog.Logger
|
||||||
if opts.Logger == nil {
|
if opts.Logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
} else {
|
} else {
|
||||||
logger = opts.Logger
|
logger = opts.Logger
|
||||||
}
|
}
|
||||||
|
@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error())
|
d.logger.Error("Unable to refresh target groups", "err", err.Error())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
|
@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||||
tgs, err := d.refresh(ctx)
|
tgs, err := d.refresh(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||||
level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error())
|
d.logger.Error("Unable to refresh target groups", "err", err.Error())
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,12 +17,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -185,7 +185,7 @@ func init() {
|
||||||
// the Discoverer interface.
|
// the Discoverer interface.
|
||||||
type Discovery struct{}
|
type Discovery struct{}
|
||||||
|
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||||
m, ok := metrics.(*scalewayMetrics)
|
m, ok := metrics.(*scalewayMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -19,12 +19,12 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/mwitkow/go-conntrack"
|
"github.com/mwitkow/go-conntrack"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -146,7 +146,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new Discovery which periodically refreshes its targets.
|
// New returns a new Discovery which periodically refreshes its targets.
|
||||||
func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*tritonMetrics)
|
m, ok := metrics.(*tritonMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
|
||||||
td, m, _ := newTritonDiscovery(conf)
|
td, m, _ := newTritonDiscovery(conf)
|
||||||
|
|
||||||
_, err := td.refresh(context.Background())
|
_, err := td.refresh(context.Background())
|
||||||
require.Error(t, err)
|
require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint")
|
||||||
require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"))
|
|
||||||
m.Unregister()
|
m.Unregister()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
cancel()
|
cancel()
|
||||||
_, err := td.refresh(ctx)
|
_, err := td.refresh(ctx)
|
||||||
require.Error(t, err)
|
require.ErrorContains(t, err, context.Canceled.Error())
|
||||||
require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
|
|
||||||
m.Unregister()
|
m.Unregister()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
@ -24,7 +25,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/kolo/xmlrpc"
|
"github.com/kolo/xmlrpc"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
|
@ -109,7 +109,7 @@ type Discovery struct {
|
||||||
entitlement string
|
entitlement string
|
||||||
separator string
|
separator string
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovererMetrics implements discovery.Config.
|
// NewDiscovererMetrics implements discovery.Config.
|
||||||
|
@ -212,7 +212,7 @@ func getEndpointInfoForSystems(
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a uyuni discovery for the given configuration.
|
// NewDiscovery returns a uyuni discovery for the given configuration.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*uyuniMetrics)
|
m, ok := metrics.(*uyuniMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -16,13 +16,13 @@ package vultr
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
@ -114,7 +114,7 @@ type Discovery struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||||
m, ok := metrics.(*vultrMetrics)
|
m, ok := metrics.(*vultrMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
|
|
@ -19,9 +19,9 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -57,7 +57,7 @@ func TestVultrSDRefresh(t *testing.T) {
|
||||||
defer metrics.Unregister()
|
defer metrics.Unregister()
|
||||||
defer refreshMetrics.Unregister()
|
defer refreshMetrics.Unregister()
|
||||||
|
|
||||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
endpoint, err := url.Parse(sdMock.Mock.Endpoint())
|
endpoint, err := url.Parse(sdMock.Mock.Endpoint())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) {
|
||||||
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring")
|
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring")
|
||||||
|
|
||||||
require.Empty(t, endpointURL)
|
require.Empty(t, endpointURL)
|
||||||
require.Error(t, err)
|
require.EqualError(t, err, "invalid xDS server URL")
|
||||||
require.Equal(t, "invalid xDS server URL", err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
|
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
|
||||||
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
|
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
|
||||||
|
|
||||||
require.Empty(t, endpointURL)
|
require.Empty(t, endpointURL)
|
||||||
require.Error(t, err)
|
require.ErrorContains(t, err, "must be either 'http' or 'https'")
|
||||||
require.Contains(t, err.Error(), "must be either 'http' or 'https'")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMakeXDSResourceHttpEndpoint(t *testing.T) {
|
func TestMakeXDSResourceHttpEndpoint(t *testing.T) {
|
||||||
|
|
|
@ -15,14 +15,14 @@ package xds
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"github.com/prometheus/common/promslog"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
|
||||||
"github.com/prometheus/prometheus/discovery"
|
"github.com/prometheus/prometheus/discovery"
|
||||||
|
@ -99,7 +99,7 @@ func (c *KumaSDConfig) SetDirectory(dir string) {
|
||||||
func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||||
logger := opts.Logger
|
logger := opts.Logger
|
||||||
if logger == nil {
|
if logger == nil {
|
||||||
logger = log.NewNopLogger()
|
logger = promslog.NewNopLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewKumaHTTPDiscovery(c, logger, opts.Metrics)
|
return NewKumaHTTPDiscovery(c, logger, opts.Metrics)
|
||||||
|
@ -158,7 +158,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
|
||||||
return targets, nil
|
return targets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
|
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
|
||||||
m, ok := metrics.(*xdsMetrics)
|
m, ok := metrics.(*xdsMetrics)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||||
|
@ -170,7 +170,7 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discove
|
||||||
var err error
|
var err error
|
||||||
clientID, err = osutil.GetFQDN()
|
clientID, err = osutil.GetFQDN()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Debug(logger).Log("msg", "error getting FQDN", "err", err)
|
logger.Debug("error getting FQDN", "err", err)
|
||||||
clientID = "prometheus"
|
clientID = "prometheus"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,13 +23,14 @@ package xds
|
||||||
|
|
||||||
import (
|
import (
|
||||||
context "context"
|
context "context"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
|
||||||
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
_ "github.com/envoyproxy/protoc-gen-validate/validate"
|
_ "github.com/envoyproxy/protoc-gen-validate/validate"
|
||||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) {
|
||||||
}}
|
}}
|
||||||
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
|
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
|
||||||
require.Nil(t, groups)
|
require.Nil(t, groups)
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
require.Contains(t, err.Error(), "cannot parse")
|
require.ErrorContains(t, err, "cannot parse")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewKumaHTTPDiscovery(t *testing.T) {
|
func TestNewKumaHTTPDiscovery(t *testing.T) {
|
||||||
|
|
|
@ -15,11 +15,10 @@ package xds
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
"github.com/go-kit/log"
|
|
||||||
"github.com/go-kit/log/level"
|
|
||||||
"github.com/prometheus/common/config"
|
"github.com/prometheus/common/config"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"google.golang.org/protobuf/encoding/protojson"
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
@ -104,7 +103,7 @@ type fetchDiscovery struct {
|
||||||
refreshInterval time.Duration
|
refreshInterval time.Duration
|
||||||
|
|
||||||
parseResources resourceParser
|
parseResources resourceParser
|
||||||
logger log.Logger
|
logger *slog.Logger
|
||||||
|
|
||||||
metrics *xdsMetrics
|
metrics *xdsMetrics
|
||||||
}
|
}
|
||||||
|
@ -140,7 +139,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "error parsing resources", "err", err)
|
d.logger.Error("error parsing resources", "err", err)
|
||||||
d.metrics.fetchFailuresCount.Inc()
|
d.metrics.fetchFailuresCount.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -153,12 +152,12 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou
|
||||||
|
|
||||||
parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl)
|
parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
level.Error(d.logger).Log("msg", "error parsing resources", "err", err)
|
d.logger.Error("error parsing resources", "err", err)
|
||||||
d.metrics.fetchFailuresCount.Inc()
|
d.metrics.fetchFailuresCount.Inc()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets))
|
d.logger.Debug("Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets))
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue