mirror of
https://github.com/prometheus/prometheus.git
synced 2025-03-05 20:59:13 -08:00
Merge branch 'main' of https://github.com/prometheus/prometheus into makefile-check-runtime-versions-target
This commit is contained in:
commit
db31ec6863
2
.github/workflows/buf-lint.yml
vendored
2
.github/workflows/buf-lint.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
2
.github/workflows/buf.yml
vendored
2
.github/workflows/buf.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'prometheus'
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: bufbuild/buf-setup-action@62ee92603c244ad0da98bab36a834a999a5329e6 # v1.43.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
26
.github/workflows/ci.yml
vendored
26
.github/workflows/ci.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
# should also be updated.
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
|
@ -29,7 +29,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
- run: go test --tags=dedupelabels ./...
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
|||
# The go version in this image should be N-1 wrt test_go.
|
||||
image: quay.io/prometheus/golang-builder:1.22-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- run: make build
|
||||
# Don't run NPM build; don't run race-detector.
|
||||
- run: make test GO_ONLY=1 test-flags=""
|
||||
|
@ -62,7 +62,7 @@ jobs:
|
|||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/setup_environment
|
||||
with:
|
||||
|
@ -79,7 +79,7 @@ jobs:
|
|||
name: Go tests on Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
go-version: 1.23.x
|
||||
|
@ -96,7 +96,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder:1.23-base
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- run: go install ./cmd/promtool/.
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
|
||||
- run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
|
||||
|
@ -121,7 +121,7 @@ jobs:
|
|||
matrix:
|
||||
thread: [ 0, 1, 2 ]
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
|
@ -146,7 +146,7 @@ jobs:
|
|||
# Whenever the Go version is updated here, .promu.yml
|
||||
# should also be updated.
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/build
|
||||
with:
|
||||
|
@ -169,7 +169,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
|
@ -182,7 +182,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
with:
|
||||
|
@ -208,7 +208,7 @@ jobs:
|
|||
needs: [test_ui, test_go, test_go_more, test_go_oldest, test_windows, golangci, codeql, build_all]
|
||||
if: github.event_name == 'push' && github.event.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/publish_main
|
||||
with:
|
||||
|
@ -225,7 +225,7 @@ jobs:
|
|||
||
|
||||
(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v3.'))
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- uses: ./.github/promci/actions/publish_release
|
||||
with:
|
||||
|
@ -240,7 +240,7 @@ jobs:
|
|||
needs: [test_ui, codeql]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- uses: prometheus/promci@468927c440349ab56c4a1aafd453b312841503c2 # v0.4.4
|
||||
- name: Install nodejs
|
||||
uses: actions/setup-node@0a44ba7841725637a19e28fa30b79a866c81b0a6 # v4.0.4
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@e2b3eafc8d227b0241d48be5f425d47c2d750a13 # v3.26.10
|
||||
|
|
4
.github/workflows/container_description.yml
vendored
4
.github/workflows/container_description.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- name: Set docker hub repo name
|
||||
run: echo "DOCKER_REPO_NAME=$(make docker-repo-name)" >> $GITHUB_ENV
|
||||
- name: Push README to Dockerhub
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
|||
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
|
||||
steps:
|
||||
- name: git checkout
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- name: Set quay.io org name
|
||||
run: echo "DOCKER_REPO=$(echo quay.io/${GITHUB_REPOSITORY_OWNER} | tr -d '-')" >> $GITHUB_ENV
|
||||
- name: Set quay.io repo name
|
||||
|
|
2
.github/workflows/repo_sync.yml
vendored
2
.github/workflows/repo_sync.yml
vendored
|
@ -13,7 +13,7 @@ jobs:
|
|||
container:
|
||||
image: quay.io/prometheus/golang-builder
|
||||
steps:
|
||||
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
|
||||
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
|
||||
- run: ./scripts/sync_repo_files.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PROMBOT_GITHUB_TOKEN }}
|
||||
|
|
2
.github/workflows/scorecards.yml
vendored
2
.github/workflows/scorecards.yml
vendored
|
@ -21,7 +21,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # tag=v4.1.6
|
||||
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # tag=v4.2.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ linters:
|
|||
- usestdlibvars
|
||||
- whitespace
|
||||
- loggercheck
|
||||
- sloglint
|
||||
|
||||
issues:
|
||||
max-issues-per-linter: 0
|
||||
|
@ -100,8 +101,6 @@ linters-settings:
|
|||
- (net/http.ResponseWriter).Write
|
||||
# No need to check for errors on server's shutdown.
|
||||
- (*net/http.Server).Shutdown
|
||||
# Never check for logger errors.
|
||||
- (github.com/go-kit/log.Logger).Log
|
||||
# Never check for rollback errors as Rollback() is called when a previous error was detected.
|
||||
- (github.com/prometheus/prometheus/storage.Appender).Rollback
|
||||
goimports:
|
||||
|
@ -153,14 +152,4 @@ linters-settings:
|
|||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
enable:
|
||||
- bool-compare
|
||||
- compares
|
||||
- empty
|
||||
- error-is-as
|
||||
- error-nil
|
||||
- expected-actual
|
||||
- len
|
||||
- require-error
|
||||
- suite-dont-use-pkg
|
||||
- suite-extra-assert-call
|
||||
enable-all: true
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"math/bits"
|
||||
"net"
|
||||
|
@ -37,8 +38,6 @@ import (
|
|||
"github.com/KimMachineGun/automemlimit/memlimit"
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/alecthomas/units"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/mwitkow/go-conntrack"
|
||||
"github.com/oklog/run"
|
||||
|
@ -46,8 +45,8 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promlog"
|
||||
promlogflag "github.com/prometheus/common/promlog/flag"
|
||||
"github.com/prometheus/common/promslog"
|
||||
promslogflag "github.com/prometheus/common/promslog/flag"
|
||||
"github.com/prometheus/common/version"
|
||||
toolkit_web "github.com/prometheus/exporter-toolkit/web"
|
||||
"go.uber.org/atomic"
|
||||
|
@ -81,6 +80,45 @@ import (
|
|||
"github.com/prometheus/prometheus/web"
|
||||
)
|
||||
|
||||
// klogv1OutputCallDepth is the stack depth where we can find the origin of this call.
|
||||
const klogv1OutputCallDepth = 6
|
||||
|
||||
// klogv1DefaultPrefixLength is the length of the log prefix that we have to strip out.
|
||||
const klogv1DefaultPrefixLength = 53
|
||||
|
||||
// klogv1Writer is used in SetOutputBySeverity call below to redirect any calls
|
||||
// to klogv1 to end up in klogv2.
|
||||
// This is a hack to support klogv1 without use of go-kit/log. It is inspired
|
||||
// by klog's upstream klogv1/v2 coexistence example:
|
||||
// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go
|
||||
type klogv1Writer struct{}
|
||||
|
||||
// Write redirects klogv1 calls to klogv2.
|
||||
// This is a hack to support klogv1 without use of go-kit/log. It is inspired
|
||||
// by klog's upstream klogv1/v2 coexistence example:
|
||||
// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go
|
||||
func (kw klogv1Writer) Write(p []byte) (n int, err error) {
|
||||
if len(p) < klogv1DefaultPrefixLength {
|
||||
klogv2.InfoDepth(klogv1OutputCallDepth, string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
switch p[0] {
|
||||
case 'I':
|
||||
klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||
case 'W':
|
||||
klogv2.WarningDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||
case 'E':
|
||||
klogv2.ErrorDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||
case 'F':
|
||||
klogv2.FatalDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||
default:
|
||||
klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
var (
|
||||
appName = "prometheus"
|
||||
|
||||
|
@ -171,82 +209,82 @@ type flagConfig struct {
|
|||
prometheusURL string
|
||||
corsRegexString string
|
||||
|
||||
promlogConfig promlog.Config
|
||||
|
||||
promqlEnableDelayedNameRemoval bool
|
||||
|
||||
promslogConfig promslog.Config
|
||||
}
|
||||
|
||||
// setFeatureListOptions sets the corresponding options from the featureList.
|
||||
func (c *flagConfig) setFeatureListOptions(logger log.Logger) error {
|
||||
func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
|
||||
for _, f := range c.featureList {
|
||||
opts := strings.Split(f, ",")
|
||||
for _, o := range opts {
|
||||
switch o {
|
||||
case "expand-external-labels":
|
||||
c.enableExpandExternalLabels = true
|
||||
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
|
||||
logger.Info("Experimental expand-external-labels enabled")
|
||||
case "exemplar-storage":
|
||||
c.tsdb.EnableExemplarStorage = true
|
||||
level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled")
|
||||
logger.Info("Experimental in-memory exemplar storage enabled")
|
||||
case "memory-snapshot-on-shutdown":
|
||||
c.tsdb.EnableMemorySnapshotOnShutdown = true
|
||||
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled")
|
||||
logger.Info("Experimental memory snapshot on shutdown enabled")
|
||||
case "extra-scrape-metrics":
|
||||
c.scrape.ExtraMetrics = true
|
||||
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled")
|
||||
logger.Info("Experimental additional scrape metrics enabled")
|
||||
case "metadata-wal-records":
|
||||
c.scrape.AppendMetadata = true
|
||||
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||
logger.Info("Experimental metadata records in WAL enabled, required for remote write 2.0")
|
||||
case "promql-per-step-stats":
|
||||
c.enablePerStepStats = true
|
||||
level.Info(logger).Log("msg", "Experimental per-step statistics reporting")
|
||||
logger.Info("Experimental per-step statistics reporting")
|
||||
case "auto-gomaxprocs":
|
||||
c.enableAutoGOMAXPROCS = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota")
|
||||
case "auto-reload-config":
|
||||
c.enableAutoReload = true
|
||||
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
|
||||
c.autoReloadInterval, _ = model.ParseDuration("1s")
|
||||
}
|
||||
level.Info(logger).Log("msg", fmt.Sprintf("Enabled automatic configuration file reloading. Checking for configuration changes every %s.", c.autoReloadInterval))
|
||||
logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
|
||||
case "auto-gomemlimit":
|
||||
c.enableAutoGOMEMLIMIT = true
|
||||
level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||
logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
|
||||
case "concurrent-rule-eval":
|
||||
c.enableConcurrentRuleEval = true
|
||||
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.")
|
||||
logger.Info("Experimental concurrent rule evaluation enabled.")
|
||||
case "promql-experimental-functions":
|
||||
parser.EnableExperimentalFunctions = true
|
||||
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.")
|
||||
logger.Info("Experimental PromQL functions enabled.")
|
||||
case "native-histograms":
|
||||
c.tsdb.EnableNativeHistograms = true
|
||||
c.scrape.EnableNativeHistogramsIngestion = true
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
case "ooo-native-histograms":
|
||||
c.tsdb.EnableOOONativeHistograms = true
|
||||
level.Info(logger).Log("msg", "Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
|
||||
logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
|
||||
case "created-timestamp-zero-ingestion":
|
||||
c.scrape.EnableCreatedTimestampZeroIngestion = true
|
||||
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
|
||||
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
|
||||
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
|
||||
case "delayed-compaction":
|
||||
c.tsdb.EnableDelayedCompaction = true
|
||||
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.")
|
||||
logger.Info("Experimental delayed compaction is enabled.")
|
||||
case "promql-delayed-name-removal":
|
||||
c.promqlEnableDelayedNameRemoval = true
|
||||
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.")
|
||||
logger.Info("Experimental PromQL delayed name removal enabled.")
|
||||
case "":
|
||||
continue
|
||||
case "old-ui":
|
||||
c.web.UseOldUI = true
|
||||
level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.")
|
||||
logger.Info("Serving previous version of the Prometheus web UI.")
|
||||
default:
|
||||
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
|
||||
logger.Warn("Unknown option for --enable-feature", "option", o)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -280,7 +318,7 @@ func main() {
|
|||
Registerer: prometheus.DefaultRegisterer,
|
||||
Gatherer: prometheus.DefaultGatherer,
|
||||
},
|
||||
promlogConfig: promlog.Config{},
|
||||
promslogConfig: promslog.Config{},
|
||||
}
|
||||
|
||||
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
|
||||
|
@ -384,6 +422,9 @@ func main() {
|
|||
serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
|
||||
Default("false").BoolVar(&cfg.tsdb.NoLockfile)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.allow-overlapping-compaction", "Allow compaction of overlapping blocks. If set to false, TSDB stops vertical compaction and leaves overlapping blocks there. The use case is to let another component handle the compaction of overlapping blocks.").
|
||||
Default("true").Hidden().BoolVar(&cfg.tsdb.EnableOverlappingCompaction)
|
||||
|
||||
serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
|
||||
Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
|
||||
|
||||
|
@ -480,7 +521,7 @@ func main() {
|
|||
|
||||
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
|
||||
|
||||
promlogflag.AddFlags(a, &cfg.promlogConfig)
|
||||
promslogflag.AddFlags(a, &cfg.promslogConfig)
|
||||
|
||||
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
|
||||
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
|
||||
|
@ -498,7 +539,8 @@ func main() {
|
|||
os.Exit(2)
|
||||
}
|
||||
|
||||
logger := promlog.New(&cfg.promlogConfig)
|
||||
logger := promslog.New(&cfg.promslogConfig)
|
||||
slog.SetDefault(logger)
|
||||
|
||||
notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer)
|
||||
cfg.web.NotificationsSub = notifs.Sub
|
||||
|
@ -553,12 +595,12 @@ func main() {
|
|||
|
||||
// Throw error for invalid config before starting other components.
|
||||
var cfgFile *config.Config
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
|
||||
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil {
|
||||
absPath, pathErr := filepath.Abs(cfg.configFile)
|
||||
if pathErr != nil {
|
||||
absPath = cfg.configFile
|
||||
}
|
||||
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
||||
logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if _, err := cfgFile.GetScrapeConfigs(); err != nil {
|
||||
|
@ -566,7 +608,7 @@ func main() {
|
|||
if pathErr != nil {
|
||||
absPath = cfg.configFile
|
||||
}
|
||||
level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
||||
logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if cfg.tsdb.EnableExemplarStorage {
|
||||
|
@ -599,7 +641,7 @@ func main() {
|
|||
if !agentMode {
|
||||
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
|
||||
cfg.tsdb.RetentionDuration = defaultRetentionDuration
|
||||
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
|
||||
logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
|
||||
}
|
||||
|
||||
// Check for overflows. This limits our max retention to 100y.
|
||||
|
@ -609,7 +651,7 @@ func main() {
|
|||
panic(err)
|
||||
}
|
||||
cfg.tsdb.RetentionDuration = y
|
||||
level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String())
|
||||
logger.Warn("Time retention value is too high. Limiting to: " + y.String())
|
||||
}
|
||||
|
||||
// Max block size settings.
|
||||
|
@ -630,11 +672,8 @@ func main() {
|
|||
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
|
||||
noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval)
|
||||
|
||||
// Above level 6, the k8s client would log bearer tokens in clear-text.
|
||||
klog.ClampLevel(6)
|
||||
klog.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
|
||||
klogv2.ClampLevel(6)
|
||||
klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
|
||||
klogv2.SetSlogLogger(logger.With("component", "k8s_client_runtime"))
|
||||
klog.SetOutputBySeverity("INFO", klogv1Writer{})
|
||||
|
||||
modeAppName := "Prometheus Server"
|
||||
mode := "server"
|
||||
|
@ -643,20 +682,22 @@ func main() {
|
|||
mode = "agent"
|
||||
}
|
||||
|
||||
level.Info(logger).Log("msg", "Starting "+modeAppName, "mode", mode, "version", version.Info())
|
||||
logger.Info("Starting "+modeAppName, "mode", mode, "version", version.Info())
|
||||
if bits.UintSize < 64 {
|
||||
level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
|
||||
logger.Warn("This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
|
||||
}
|
||||
|
||||
level.Info(logger).Log("build_context", version.BuildContext())
|
||||
level.Info(logger).Log("host_details", prom_runtime.Uname())
|
||||
level.Info(logger).Log("fd_limits", prom_runtime.FdLimits())
|
||||
level.Info(logger).Log("vm_limits", prom_runtime.VMLimits())
|
||||
logger.Info("operational information",
|
||||
"build_context", version.BuildContext(),
|
||||
"host_details", prom_runtime.Uname(),
|
||||
"fd_limits", prom_runtime.FdLimits(),
|
||||
"vm_limits", prom_runtime.VMLimits(),
|
||||
)
|
||||
|
||||
var (
|
||||
localStorage = &readyStorage{stats: tsdb.NewDBStats()}
|
||||
scraper = &readyScrapeManager{}
|
||||
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
|
||||
remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
|
||||
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
|
||||
)
|
||||
|
||||
|
@ -664,7 +705,7 @@ func main() {
|
|||
ctxWeb, cancelWeb = context.WithCancel(context.Background())
|
||||
ctxRule = context.Background()
|
||||
|
||||
notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier"))
|
||||
notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier"))
|
||||
|
||||
ctxScrape, cancelScrape = context.WithCancel(context.Background())
|
||||
ctxNotify, cancelNotify = context.WithCancel(context.Background())
|
||||
|
@ -679,37 +720,37 @@ func main() {
|
|||
// they are not specific to an SD instance.
|
||||
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err)
|
||||
logger.Error("failed to register Kubernetes client metrics", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err)
|
||||
logger.Error("failed to register service discovery metrics", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
|
||||
if discoveryManagerScrape == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager scrape")
|
||||
logger.Error("failed to create a discovery manager scrape")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
|
||||
if discoveryManagerNotify == nil {
|
||||
level.Error(logger).Log("msg", "failed to create a discovery manager notify")
|
||||
logger.Error("failed to create a discovery manager notify")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
scrapeManager, err := scrape.NewManager(
|
||||
&cfg.scrape,
|
||||
log.With(logger, "component", "scrape manager"),
|
||||
func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) },
|
||||
logger.With("component", "scrape manager"),
|
||||
logging.NewJSONFileLogger,
|
||||
fanoutStorage,
|
||||
prometheus.DefaultRegisterer,
|
||||
)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err)
|
||||
logger.Error("failed to create a scrape manager", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -722,10 +763,10 @@ func main() {
|
|||
|
||||
if cfg.enableAutoGOMAXPROCS {
|
||||
l := func(format string, a ...interface{}) {
|
||||
level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...))
|
||||
logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
|
||||
}
|
||||
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
|
||||
level.Warn(logger).Log("component", "automaxprocs", "msg", "Failed to set GOMAXPROCS automatically", "err", err)
|
||||
logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -739,17 +780,17 @@ func main() {
|
|||
),
|
||||
),
|
||||
); err != nil {
|
||||
level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !agentMode {
|
||||
opts := promql.EngineOpts{
|
||||
Logger: log.With(logger, "component", "query engine"),
|
||||
Logger: logger.With("component", "query engine"),
|
||||
Reg: prometheus.DefaultRegisterer,
|
||||
MaxSamples: cfg.queryMaxSamples,
|
||||
Timeout: time.Duration(cfg.queryTimeout),
|
||||
ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")),
|
||||
ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")),
|
||||
LookbackDelta: time.Duration(cfg.lookbackDelta),
|
||||
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
|
||||
// EnableAtModifier and EnableNegativeOffset have to be
|
||||
|
@ -770,7 +811,7 @@ func main() {
|
|||
Context: ctxRule,
|
||||
ExternalURL: cfg.web.ExternalURL,
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
Logger: log.With(logger, "component", "rule manager"),
|
||||
Logger: logger.With("component", "rule manager"),
|
||||
OutageTolerance: time.Duration(cfg.outageTolerance),
|
||||
ForGracePeriod: time.Duration(cfg.forGracePeriod),
|
||||
ResendDelay: time.Duration(cfg.resendDelay),
|
||||
|
@ -821,7 +862,7 @@ func main() {
|
|||
}
|
||||
|
||||
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
|
||||
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web)
|
||||
webHandler := web.New(logger.With("component", "web"), &cfg.web)
|
||||
|
||||
// Monitor outgoing connections on default transport with conntrack.
|
||||
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
|
||||
|
@ -948,18 +989,18 @@ func main() {
|
|||
|
||||
listeners, err := webHandler.Listeners()
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Unable to start web listeners", "err", err)
|
||||
logger.Error("Unable to start web listener", "err", err)
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
||||
logger.Warn("Closing query engine failed", "err", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = toolkit_web.Validate(*webConfig)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err)
|
||||
logger.Error("Unable to validate web configuration file", "err", err)
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
||||
logger.Warn("Closing query engine failed", "err", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -975,15 +1016,15 @@ func main() {
|
|||
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
|
||||
select {
|
||||
case sig := <-term:
|
||||
level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String())
|
||||
logger.Warn("Received an OS signal, exiting gracefully...", "signal", sig.String())
|
||||
reloadReady.Close()
|
||||
case <-webHandler.Quit():
|
||||
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...")
|
||||
logger.Warn("Received termination request via web service, exiting gracefully...")
|
||||
case <-cancel:
|
||||
reloadReady.Close()
|
||||
}
|
||||
if err := queryEngine.Close(); err != nil {
|
||||
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err)
|
||||
logger.Warn("Closing query engine failed", "err", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -999,11 +1040,11 @@ func main() {
|
|||
g.Add(
|
||||
func() error {
|
||||
err := discoveryManagerScrape.Run()
|
||||
level.Info(logger).Log("msg", "Scrape discovery manager stopped")
|
||||
logger.Info("Scrape discovery manager stopped")
|
||||
return err
|
||||
},
|
||||
func(err error) {
|
||||
level.Info(logger).Log("msg", "Stopping scrape discovery manager...")
|
||||
logger.Info("Stopping scrape discovery manager...")
|
||||
cancelScrape()
|
||||
},
|
||||
)
|
||||
|
@ -1013,11 +1054,11 @@ func main() {
|
|||
g.Add(
|
||||
func() error {
|
||||
err := discoveryManagerNotify.Run()
|
||||
level.Info(logger).Log("msg", "Notify discovery manager stopped")
|
||||
logger.Info("Notify discovery manager stopped")
|
||||
return err
|
||||
},
|
||||
func(err error) {
|
||||
level.Info(logger).Log("msg", "Stopping notify discovery manager...")
|
||||
logger.Info("Stopping notify discovery manager...")
|
||||
cancelNotify()
|
||||
},
|
||||
)
|
||||
|
@ -1046,7 +1087,7 @@ func main() {
|
|||
<-reloadReady.C
|
||||
|
||||
err := scrapeManager.Run(discoveryManagerScrape.SyncCh())
|
||||
level.Info(logger).Log("msg", "Scrape manager stopped")
|
||||
logger.Info("Scrape manager stopped")
|
||||
return err
|
||||
},
|
||||
func(err error) {
|
||||
|
@ -1054,7 +1095,7 @@ func main() {
|
|||
// so that it doesn't try to write samples to a closed storage.
|
||||
// We should also wait for rule manager to be fully stopped to ensure
|
||||
// we don't trigger any false positive alerts for rules using absent().
|
||||
level.Info(logger).Log("msg", "Stopping scrape manager...")
|
||||
logger.Info("Stopping scrape manager...")
|
||||
scrapeManager.Stop()
|
||||
},
|
||||
)
|
||||
|
@ -1085,7 +1126,7 @@ func main() {
|
|||
if cfg.enableAutoReload {
|
||||
checksum, err = config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to generate initial checksum for configuration file", "err", err)
|
||||
logger.Error("Failed to generate initial checksum for configuration file", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1105,17 +1146,17 @@ func main() {
|
|||
select {
|
||||
case <-hup:
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
} else if cfg.enableAutoReload {
|
||||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
checksum = currentChecksum
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
||||
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||
}
|
||||
}
|
||||
case rc := <-webHandler.Reload():
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
rc <- err
|
||||
} else {
|
||||
rc <- nil
|
||||
|
@ -1123,7 +1164,7 @@ func main() {
|
|||
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
|
||||
checksum = currentChecksum
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
||||
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1133,14 +1174,14 @@ func main() {
|
|||
}
|
||||
currentChecksum, err := config.GenerateChecksum(cfg.configFile)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err)
|
||||
logger.Error("Failed to generate checksum during configuration reload", "err", err)
|
||||
} else if currentChecksum == checksum {
|
||||
continue
|
||||
}
|
||||
level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.")
|
||||
logger.Info("Configuration file change detected, reloading the configuration.")
|
||||
|
||||
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
|
||||
level.Error(logger).Log("msg", "Error reloading config", "err", err)
|
||||
logger.Error("Error reloading config", "err", err)
|
||||
} else {
|
||||
checksum = currentChecksum
|
||||
}
|
||||
|
@ -1177,7 +1218,7 @@ func main() {
|
|||
|
||||
webHandler.SetReady(web.Ready)
|
||||
notifs.DeleteNotification(notifications.StartingUp)
|
||||
level.Info(logger).Log("msg", "Server is ready to receive web requests.")
|
||||
logger.Info("Server is ready to receive web requests.")
|
||||
<-cancel
|
||||
return nil
|
||||
},
|
||||
|
@ -1192,7 +1233,7 @@ func main() {
|
|||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
level.Info(logger).Log("msg", "Starting TSDB ...")
|
||||
logger.Info("Starting TSDB ...")
|
||||
if cfg.tsdb.WALSegmentSize != 0 {
|
||||
if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 {
|
||||
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
|
||||
|
@ -1211,13 +1252,13 @@ func main() {
|
|||
|
||||
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
|
||||
case "NFS_SUPER_MAGIC":
|
||||
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
||||
logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType)
|
||||
default:
|
||||
level.Info(logger).Log("fs_type", fsType)
|
||||
logger.Info("filesystem information", "fs_type", fsType)
|
||||
}
|
||||
|
||||
level.Info(logger).Log("msg", "TSDB started")
|
||||
level.Debug(logger).Log("msg", "TSDB options",
|
||||
logger.Info("TSDB started")
|
||||
logger.Debug("TSDB options",
|
||||
"MinBlockDuration", cfg.tsdb.MinBlockDuration,
|
||||
"MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
|
||||
"MaxBytes", cfg.tsdb.MaxBytes,
|
||||
|
@ -1236,7 +1277,7 @@ func main() {
|
|||
},
|
||||
func(err error) {
|
||||
if err := fanoutStorage.Close(); err != nil {
|
||||
level.Error(logger).Log("msg", "Error stopping storage", "err", err)
|
||||
logger.Error("Error stopping storage", "err", err)
|
||||
}
|
||||
close(cancel)
|
||||
},
|
||||
|
@ -1248,7 +1289,7 @@ func main() {
|
|||
cancel := make(chan struct{})
|
||||
g.Add(
|
||||
func() error {
|
||||
level.Info(logger).Log("msg", "Starting WAL storage ...")
|
||||
logger.Info("Starting WAL storage ...")
|
||||
if cfg.agent.WALSegmentSize != 0 {
|
||||
if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 {
|
||||
return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB")
|
||||
|
@ -1267,13 +1308,13 @@ func main() {
|
|||
|
||||
switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
|
||||
case "NFS_SUPER_MAGIC":
|
||||
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
||||
logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
|
||||
default:
|
||||
level.Info(logger).Log("fs_type", fsType)
|
||||
logger.Info(fsType)
|
||||
}
|
||||
|
||||
level.Info(logger).Log("msg", "Agent WAL storage started")
|
||||
level.Debug(logger).Log("msg", "Agent WAL storage options",
|
||||
logger.Info("Agent WAL storage started")
|
||||
logger.Debug("Agent WAL storage options",
|
||||
"WALSegmentSize", cfg.agent.WALSegmentSize,
|
||||
"WALCompression", cfg.agent.WALCompression,
|
||||
"StripeSize", cfg.agent.StripeSize,
|
||||
|
@ -1291,7 +1332,7 @@ func main() {
|
|||
},
|
||||
func(e error) {
|
||||
if err := fanoutStorage.Close(); err != nil {
|
||||
level.Error(logger).Log("msg", "Error stopping storage", "err", err)
|
||||
logger.Error("Error stopping storage", "err", err)
|
||||
}
|
||||
close(cancel)
|
||||
},
|
||||
|
@ -1325,7 +1366,7 @@ func main() {
|
|||
<-reloadReady.C
|
||||
|
||||
notifierManager.Run(discoveryManagerNotify.SyncCh())
|
||||
level.Info(logger).Log("msg", "Notifier manager stopped")
|
||||
logger.Info("Notifier manager stopped")
|
||||
return nil
|
||||
},
|
||||
func(err error) {
|
||||
|
@ -1334,16 +1375,16 @@ func main() {
|
|||
)
|
||||
}
|
||||
if err := g.Run(); err != nil {
|
||||
level.Error(logger).Log("err", err)
|
||||
logger.Error("Error running goroutines from run.Group", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
level.Info(logger).Log("msg", "See you next time!")
|
||||
logger.Info("See you next time!")
|
||||
}
|
||||
|
||||
func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) {
|
||||
func openDBWithMetrics(dir string, logger *slog.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) {
|
||||
db, err := tsdb.Open(
|
||||
dir,
|
||||
log.With(logger, "component", "tsdb"),
|
||||
logger.With("component", "tsdb"),
|
||||
reg,
|
||||
opts,
|
||||
stats,
|
||||
|
@ -1396,10 +1437,10 @@ type reloader struct {
|
|||
reloader func(*config.Config) error
|
||||
}
|
||||
|
||||
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
|
||||
start := time.Now()
|
||||
timings := []interface{}{}
|
||||
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
|
||||
timingsLogger := logger
|
||||
logger.Info("Loading configuration file", "filename", filename)
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
|
@ -1427,10 +1468,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
|||
for _, rl := range rls {
|
||||
rstart := time.Now()
|
||||
if err := rl.reloader(conf); err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err)
|
||||
logger.Error("Failed to apply configuration", "err", err)
|
||||
failed = true
|
||||
}
|
||||
timings = append(timings, rl.name, time.Since(rstart))
|
||||
timingsLogger = timingsLogger.With((rl.name), time.Since(rstart))
|
||||
}
|
||||
if failed {
|
||||
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
|
||||
|
@ -1438,7 +1479,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
|||
|
||||
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
|
||||
if oldGoGC != conf.Runtime.GoGC {
|
||||
level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
|
||||
logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
|
||||
}
|
||||
// Write the new setting out to the ENV var for runtime API output.
|
||||
if conf.Runtime.GoGC >= 0 {
|
||||
|
@ -1448,8 +1489,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
|
|||
}
|
||||
|
||||
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
|
||||
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)}
|
||||
level.Info(logger).Log(append(l, timings...)...)
|
||||
timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -31,9 +31,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
|
@ -295,7 +295,7 @@ func TestTimeMetrics(t *testing.T) {
|
|||
tmpDir := t.TempDir()
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil)
|
||||
db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, db.Close())
|
||||
|
|
|
@ -442,6 +442,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine {
|
|||
file, err := os.Open(path)
|
||||
require.NoError(t, err)
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
var q queryLogLine
|
||||
|
|
|
@ -21,9 +21,10 @@ import (
|
|||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/oklog/ulid"
|
||||
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/textparse"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
|
@ -120,7 +121,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
|
|||
// also need to append samples throughout the whole block range. To allow that, we
|
||||
// pretend that the block is twice as large here, but only really add sample in the
|
||||
// original interval later.
|
||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration)
|
||||
w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("block writer: %w", err)
|
||||
}
|
||||
|
|
|
@ -32,13 +32,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/google/pprof/profile"
|
||||
"github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil/promlint"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
"github.com/prometheus/exporter-toolkit/web"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
@ -575,7 +575,7 @@ func checkFileExists(fn string) error {
|
|||
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
|
||||
fmt.Println("Checking", filename)
|
||||
|
||||
cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger())
|
||||
cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1182,7 +1182,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
|
|||
return fmt.Errorf("new api client error: %w", err)
|
||||
}
|
||||
|
||||
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api)
|
||||
ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api)
|
||||
errs := ruleImporter.loadGroups(ctx, files)
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
|
|
|
@ -146,7 +146,7 @@ func TestCheckSDFile(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkSDFile(test.file)
|
||||
if test.err != "" {
|
||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
@ -228,7 +228,7 @@ func TestCheckTargetConfig(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
if test.err != "" {
|
||||
require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||
require.EqualErrorf(t, err, test.err, "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
@ -315,7 +315,7 @@ func TestCheckConfigSyntax(t *testing.T) {
|
|||
expectedErrMsg = test.errWindows
|
||||
}
|
||||
if expectedErrMsg != "" {
|
||||
require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error())
|
||||
require.EqualErrorf(t, err, expectedErrMsg, "Expected error %q, got %q", test.err, err.Error())
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
@ -345,7 +345,7 @@ func TestAuthorizationConfig(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
_, err := checkConfig(false, "testdata/"+test.file, false)
|
||||
if test.err != "" {
|
||||
require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||
require.ErrorContains(t, err, test.err, "Expected error to contain %q, got %q", test.err, err.Error())
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -16,12 +16,12 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/model/timestamp"
|
||||
|
@ -38,7 +38,7 @@ type queryRangeAPI interface {
|
|||
}
|
||||
|
||||
type ruleImporter struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
config ruleImporterConfig
|
||||
|
||||
apiClient queryRangeAPI
|
||||
|
@ -57,8 +57,8 @@ type ruleImporterConfig struct {
|
|||
|
||||
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
|
||||
// written to disk in blocks.
|
||||
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
||||
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
||||
func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
|
||||
logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
|
||||
return &ruleImporter{
|
||||
logger: logger,
|
||||
config: config,
|
||||
|
@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string)
|
|||
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
|
||||
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
|
||||
for name, group := range importer.groups {
|
||||
level.Info(importer.logger).Log("backfiller", "processing group", "name", name)
|
||||
importer.logger.Info("processing group", "component", "backfiller", "name", name)
|
||||
|
||||
for i, r := range group.Rules() {
|
||||
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name())
|
||||
importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name())
|
||||
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
return fmt.Errorf("query range: %w", err)
|
||||
}
|
||||
if warnings != nil {
|
||||
level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings)
|
||||
importer.logger.Warn("Range query returned warnings.", "warnings", warnings)
|
||||
}
|
||||
|
||||
// To prevent races with compaction, a block writer only allows appending samples
|
||||
|
@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
|
|||
// also need to append samples throughout the whole block range. To allow that, we
|
||||
// pretend that the block is twice as large here, but only really add sample in the
|
||||
// original interval later.
|
||||
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
||||
w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
|
||||
if err != nil {
|
||||
return fmt.Errorf("new block writer: %w", err)
|
||||
}
|
||||
|
|
|
@ -21,9 +21,9 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
v1 "github.com/prometheus/client_golang/api/prometheus/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
|
|||
}
|
||||
|
||||
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
cfg := ruleImporterConfig{
|
||||
outputDir: tmpDir,
|
||||
start: start.Add(-10 * time.Hour),
|
||||
|
|
|
@ -20,9 +20,9 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -39,7 +39,7 @@ type sdCheckResult struct {
|
|||
|
||||
// CheckSD performs service discovery for the given job name and reports the results.
|
||||
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
|
||||
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
|
||||
logger := promslog.New(&promslog.Config{})
|
||||
|
||||
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
|
||||
if err != nil {
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -32,9 +33,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/units"
|
||||
"github.com/go-kit/log"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
|
@ -60,7 +62,7 @@ type writeBenchmark struct {
|
|||
memprof *os.File
|
||||
blockprof *os.File
|
||||
mtxprof *os.File
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
|
||||
|
@ -68,7 +70,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
|||
outPath: outPath,
|
||||
samplesFile: samplesFile,
|
||||
numMetrics: numMetrics,
|
||||
logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)),
|
||||
logger: promslog.New(&promslog.Config{}),
|
||||
}
|
||||
if b.outPath == "" {
|
||||
dir, err := os.MkdirTemp("", "tsdb_bench")
|
||||
|
@ -87,9 +89,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
|
|||
|
||||
dir := filepath.Join(b.outPath, "storage")
|
||||
|
||||
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
|
||||
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
|
||||
st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{
|
||||
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
|
||||
MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
|
||||
}, tsdb.NewDBStats())
|
||||
|
|
|
@ -26,13 +26,13 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/nsf/jsondiff"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/model/histogram"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
|
@ -218,7 +218,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
|
|||
Appendable: suite.Storage(),
|
||||
Context: context.Background(),
|
||||
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
|
||||
Logger: log.NewNopLogger(),
|
||||
Logger: promslog.NewNopLogger(),
|
||||
}
|
||||
m := rules.NewManager(opts)
|
||||
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)
|
||||
|
|
|
@ -16,6 +16,7 @@ package config
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -25,8 +26,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/units"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -73,7 +72,7 @@ const (
|
|||
)
|
||||
|
||||
// Load parses the YAML input s into a Config.
|
||||
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
||||
func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||
cfg := &Config{}
|
||||
// If the entire config body is empty the UnmarshalYAML method is
|
||||
// never called. We thus have to set the DefaultConfig at the entry
|
||||
|
@ -98,11 +97,11 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
|||
if v := os.Getenv(s); v != "" {
|
||||
return v
|
||||
}
|
||||
level.Warn(logger).Log("msg", "Empty environment variable", "name", s)
|
||||
logger.Warn("Empty environment variable", "name", s)
|
||||
return ""
|
||||
})
|
||||
if newV != v.Value {
|
||||
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
||||
logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV)
|
||||
}
|
||||
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
|
||||
b.Add(v.Name, newV)
|
||||
|
@ -112,7 +111,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
|
|||
}
|
||||
|
||||
// LoadFile parses the given YAML file into a Config.
|
||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
|
||||
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -24,10 +24,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/units"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -1501,7 +1501,7 @@ var expectedConf = &Config{
|
|||
}
|
||||
|
||||
func TestYAMLRoundtrip(t *testing.T) {
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger())
|
||||
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1514,7 +1514,7 @@ func TestYAMLRoundtrip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger())
|
||||
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1529,7 +1529,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
|
|||
|
||||
func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
||||
t.Run("good config", func(t *testing.T) {
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger())
|
||||
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -1541,7 +1541,7 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("bad config", func(t *testing.T) {
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger())
|
||||
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
|
||||
require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
|
||||
})
|
||||
|
@ -1550,16 +1550,16 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
|
|||
func TestLoadConfig(t *testing.T) {
|
||||
// Parse a valid file that sets a global scrape timeout. This tests whether parsing
|
||||
// an overwritten default field in the global config permanently changes the default.
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedConf, c)
|
||||
}
|
||||
|
||||
func TestScrapeIntervalLarger(t *testing.T) {
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.ScrapeConfigs, 1)
|
||||
for _, sc := range c.ScrapeConfigs {
|
||||
|
@ -1569,7 +1569,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
|
|||
|
||||
// YAML marshaling must not reveal authentication credentials.
|
||||
func TestElideSecrets(t *testing.T) {
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
|
||||
|
@ -1586,31 +1586,31 @@ func TestElideSecrets(t *testing.T) {
|
|||
|
||||
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
|
||||
// Parse a valid file that sets a rule files with an absolute path
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger())
|
||||
c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ruleFilesExpectedConf, c)
|
||||
}
|
||||
|
||||
func TestKubernetesEmptyAPIServer(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesWithKubeConfig(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestKubernetesSelectors(t *testing.T) {
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -2094,9 +2094,8 @@ func TestBadConfigs(t *testing.T) {
|
|||
model.NameValidationScheme = model.UTF8Validation
|
||||
}()
|
||||
for _, ee := range expectedErrors {
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
|
||||
require.Error(t, err, "%s", ee.filename)
|
||||
require.Contains(t, err.Error(), ee.errMsg,
|
||||
_, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, ee.errMsg,
|
||||
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
|
||||
}
|
||||
}
|
||||
|
@ -2126,7 +2125,7 @@ func TestBadStaticConfigsYML(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyConfig(t *testing.T) {
|
||||
c, err := Load("", false, log.NewNopLogger())
|
||||
c, err := Load("", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
require.Equal(t, exp, *c)
|
||||
|
@ -2136,38 +2135,38 @@ func TestExpandExternalLabels(t *testing.T) {
|
|||
// Cleanup ant TEST env variable that could exist on the system.
|
||||
os.Setenv("TEST", "")
|
||||
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
|
||||
os.Setenv("TEST", "TestValue")
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
|
||||
c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
|
||||
}
|
||||
|
||||
func TestAgentMode(t *testing.T) {
|
||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger())
|
||||
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
|
||||
|
||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger())
|
||||
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger())
|
||||
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
|
||||
|
||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger())
|
||||
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, c.RemoteWriteConfigs)
|
||||
|
||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger())
|
||||
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
require.Len(t, c.RemoteWriteConfigs, 1)
|
||||
require.Equal(
|
||||
|
@ -2178,7 +2177,7 @@ func TestAgentMode(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyGlobalBlock(t *testing.T) {
|
||||
c, err := Load("global:\n", false, log.NewNopLogger())
|
||||
c, err := Load("global:\n", false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
exp := DefaultConfig
|
||||
exp.Runtime = DefaultRuntimeConfig
|
||||
|
@ -2333,7 +2332,7 @@ func TestGetScrapeConfigs(t *testing.T) {
|
|||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger())
|
||||
c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
scfgs, err := c.GetScrapeConfigs()
|
||||
|
@ -2351,7 +2350,7 @@ func kubernetesSDHostURL() config.URL {
|
|||
}
|
||||
|
||||
func TestScrapeConfigDisableCompression(t *testing.T) {
|
||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger())
|
||||
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
@ -2398,7 +2397,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger())
|
||||
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
out, err := yaml.Marshal(want)
|
||||
|
|
|
@ -233,7 +233,7 @@ type Config interface {
|
|||
}
|
||||
|
||||
type DiscovererOptions struct {
|
||||
Logger log.Logger
|
||||
Logger *slog.Logger
|
||||
|
||||
// A registerer for the Discoverer's metrics.
|
||||
Registerer prometheus.Registerer
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -29,11 +30,10 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
|
@ -146,7 +146,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
// the Discoverer interface.
|
||||
type EC2Discovery struct {
|
||||
*refresh.Discovery
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
cfg *EC2SDConfig
|
||||
ec2 *ec2.EC2
|
||||
|
||||
|
@ -157,14 +157,14 @@ type EC2Discovery struct {
|
|||
}
|
||||
|
||||
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
|
||||
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||
func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
|
||||
m, ok := metrics.(*ec2Metrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
d := &EC2Discovery{
|
||||
logger: logger,
|
||||
|
@ -254,8 +254,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
// Prometheus requires a reload if AWS adds a new AZ to the region.
|
||||
if d.azToAZID == nil {
|
||||
if err := d.refreshAZIDs(ctx); err != nil {
|
||||
level.Debug(d.logger).Log(
|
||||
"msg", "Unable to describe availability zones",
|
||||
d.logger.Debug(
|
||||
"Unable to describe availability zones",
|
||||
"err", err)
|
||||
}
|
||||
}
|
||||
|
@ -296,8 +296,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
|
|||
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
|
||||
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
|
||||
if !ok && d.azToAZID != nil {
|
||||
level.Debug(d.logger).Log(
|
||||
"msg", "Availability zone ID not found",
|
||||
d.logger.Debug(
|
||||
"Availability zone ID not found",
|
||||
"az", *inst.Placement.AvailabilityZone)
|
||||
}
|
||||
labels[ec2LabelAZID] = model.LabelValue(azID)
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -29,10 +30,10 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/lightsail"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
|
@ -130,14 +131,14 @@ type LightsailDiscovery struct {
|
|||
}
|
||||
|
||||
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
|
||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||
func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
|
||||
m, ok := metrics.(*lightsailMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
d := &LightsailDiscovery{
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -35,10 +36,9 @@ import (
|
|||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||
cache "github.com/Code-Hex/go-generics-cache"
|
||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
config_util "github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
@ -175,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
|
||||
type Discovery struct {
|
||||
*refresh.Discovery
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
cfg *SDConfig
|
||||
port int
|
||||
cache *cache.Cache[string, *armnetwork.Interface]
|
||||
|
@ -183,14 +183,14 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
|
||||
func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*azureMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
|
||||
d := &Discovery{
|
||||
|
@ -228,13 +228,13 @@ type azureClient struct {
|
|||
vm *armcompute.VirtualMachinesClient
|
||||
vmss *armcompute.VirtualMachineScaleSetsClient
|
||||
vmssvm *armcompute.VirtualMachineScaleSetVMsClient
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
var _ client = &azureClient{}
|
||||
|
||||
// createAzureClient is a helper function for creating an Azure compute client to ARM.
|
||||
func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) {
|
||||
func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) {
|
||||
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
|
||||
if err != nil {
|
||||
return &azureClient{}, err
|
||||
|
@ -337,21 +337,21 @@ type virtualMachine struct {
|
|||
}
|
||||
|
||||
// Create a new azureResource object from an ID string.
|
||||
func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) {
|
||||
func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
resourceID, err := arm.ParseResourceID(id)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("invalid ID '%s': %w", id, err)
|
||||
level.Error(logger).Log("err", err)
|
||||
logger.Error("Failed to parse resource ID", "err", err)
|
||||
return &arm.ResourceID{}, err
|
||||
}
|
||||
return resourceID, nil
|
||||
}
|
||||
|
||||
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
||||
defer level.Debug(d.logger).Log("msg", "Azure discovery completed")
|
||||
defer d.logger.Debug("Azure discovery completed")
|
||||
|
||||
client, err := createAzureClient(*d.cfg, d.logger)
|
||||
if err != nil {
|
||||
|
@ -365,7 +365,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
return nil, fmt.Errorf("could not get virtual machines: %w", err)
|
||||
}
|
||||
|
||||
level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines))
|
||||
d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines))
|
||||
|
||||
// Load the vms managed by scale sets.
|
||||
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
|
||||
|
@ -459,7 +459,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
|||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, errorNotFound) {
|
||||
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err)
|
||||
d.logger.Warn("Network interface does not exist", "name", nicID, "err", err)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -480,7 +480,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
|
|||
// yet support this. On deallocated machines, this value happens to be nil so it
|
||||
// is a cheap and easy way to determine if a machine is allocated or not.
|
||||
if networkInterface.Properties.Primary == nil {
|
||||
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name)
|
||||
d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -724,7 +724,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) {
|
|||
rs := time.Duration(random) * time.Second
|
||||
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
|
||||
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
|
||||
level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds())
|
||||
d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds())
|
||||
}
|
||||
|
||||
// getFromCache will get the network Interface for the specified nicID
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
|
||||
cache "github.com/Code-Hex/go-generics-cache"
|
||||
"github.com/Code-Hex/go-generics-cache/policy/lru"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
)
|
||||
|
@ -150,7 +150,7 @@ func TestVMToLabelSet(t *testing.T) {
|
|||
cfg := DefaultSDConfig
|
||||
d := &Discovery{
|
||||
cfg: &cfg,
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
|
||||
}
|
||||
network := armnetwork.Interface{
|
||||
|
|
|
@ -17,17 +17,17 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
consul "github.com/hashicorp/consul/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -113,8 +113,11 @@ type SDConfig struct {
|
|||
Services []string `yaml:"services,omitempty"`
|
||||
// A list of tags used to filter instances inside a service. Services must contain all tags in the list.
|
||||
ServiceTags []string `yaml:"tags,omitempty"`
|
||||
// Desired node metadata.
|
||||
// Desired node metadata. As of Consul 1.14, consider `filter` instead.
|
||||
NodeMeta map[string]string `yaml:"node_meta,omitempty"`
|
||||
// Consul filter string
|
||||
// See https://www.consul.io/api-docs/catalog#filtering-1, for syntax
|
||||
Filter string `yaml:"filter,omitempty"`
|
||||
|
||||
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
|
||||
}
|
||||
|
@ -174,22 +177,23 @@ type Discovery struct {
|
|||
watchedServices []string // Set of services which will be discovered.
|
||||
watchedTags []string // Tags used to filter instances of a service.
|
||||
watchedNodeMeta map[string]string
|
||||
watchedFilter string
|
||||
allowStale bool
|
||||
refreshInterval time.Duration
|
||||
finalizer func()
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
metrics *consulMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*consulMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
|
||||
|
@ -218,6 +222,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
|
|||
watchedServices: conf.Services,
|
||||
watchedTags: conf.ServiceTags,
|
||||
watchedNodeMeta: conf.NodeMeta,
|
||||
watchedFilter: conf.Filter,
|
||||
allowStale: conf.AllowStale,
|
||||
refreshInterval: time.Duration(conf.RefreshInterval),
|
||||
clientDatacenter: conf.Datacenter,
|
||||
|
@ -282,7 +287,7 @@ func (d *Discovery) getDatacenter() error {
|
|||
|
||||
info, err := d.client.Agent().Self()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
||||
d.logger.Error("Error retrieving datacenter name", "err", err)
|
||||
d.metrics.rpcFailuresCount.Inc()
|
||||
return err
|
||||
}
|
||||
|
@ -290,12 +295,12 @@ func (d *Discovery) getDatacenter() error {
|
|||
dc, ok := info["Config"]["Datacenter"].(string)
|
||||
if !ok {
|
||||
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
|
||||
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err)
|
||||
d.logger.Error("Error retrieving datacenter name", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
d.clientDatacenter = dc
|
||||
d.logger = log.With(d.logger, "datacenter", dc)
|
||||
d.logger = d.logger.With("datacenter", dc)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -361,13 +366,14 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
// entire list of services.
|
||||
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
|
||||
catalog := d.client.Catalog()
|
||||
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ","))
|
||||
d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","), "filter", d.watchedFilter)
|
||||
|
||||
opts := &consul.QueryOptions{
|
||||
WaitIndex: *lastIndex,
|
||||
WaitTime: watchTimeout,
|
||||
AllowStale: d.allowStale,
|
||||
NodeMeta: d.watchedNodeMeta,
|
||||
Filter: d.watchedFilter,
|
||||
}
|
||||
t0 := time.Now()
|
||||
srvs, meta, err := catalog.Services(opts.WithContext(ctx))
|
||||
|
@ -382,7 +388,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err)
|
||||
d.logger.Error("Error refreshing service list", "err", err)
|
||||
d.metrics.rpcFailuresCount.Inc()
|
||||
time.Sleep(retryInterval)
|
||||
return
|
||||
|
@ -445,7 +451,7 @@ type consulService struct {
|
|||
discovery *Discovery
|
||||
client *consul.Client
|
||||
tagSeparator string
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
rpcFailuresCount prometheus.Counter
|
||||
serviceRPCDuration prometheus.Observer
|
||||
}
|
||||
|
@ -490,7 +496,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
|
|||
|
||||
// Get updates for a service.
|
||||
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
|
||||
level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
|
||||
srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
|
||||
|
||||
opts := &consul.QueryOptions{
|
||||
WaitIndex: *lastIndex,
|
||||
|
@ -513,7 +519,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
||||
srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
|
||||
srv.rpcFailuresCount.Inc()
|
||||
time.Sleep(retryInterval)
|
||||
return
|
||||
|
|
|
@ -21,10 +21,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
@ -252,6 +252,8 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
|||
case "/v1/catalog/services?index=1&wait=120000ms":
|
||||
time.Sleep(5 * time.Second)
|
||||
response = ServicesTestAnswer
|
||||
case "/v1/catalog/services?filter=NodeMeta.rack_name+%3D%3D+%222304%22&index=1&wait=120000ms":
|
||||
response = ServicesTestAnswer
|
||||
default:
|
||||
t.Errorf("Unhandled consul call: %s", r.URL)
|
||||
}
|
||||
|
@ -270,7 +272,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
|
|||
}
|
||||
|
||||
func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
|
||||
logger := log.NewNopLogger()
|
||||
logger := promslog.NewNopLogger()
|
||||
|
||||
metrics := NewTestMetrics(t, config, prometheus.NewRegistry())
|
||||
|
||||
|
@ -369,6 +371,27 @@ func TestAllOptions(t *testing.T) {
|
|||
<-ch
|
||||
}
|
||||
|
||||
// Watch the test service with a specific tag and node-meta via Filter parameter.
|
||||
func TestFilterOption(t *testing.T) {
|
||||
stub, config := newServer(t)
|
||||
defer stub.Close()
|
||||
|
||||
config.Services = []string{"test"}
|
||||
config.Filter = `NodeMeta.rack_name == "2304"`
|
||||
config.Token = "fake-token"
|
||||
|
||||
d := newDiscovery(t, config)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ch := make(chan []*targetgroup.Group)
|
||||
go func() {
|
||||
d.Run(ctx, ch)
|
||||
close(ch)
|
||||
}()
|
||||
checkOneTarget(t, <-ch)
|
||||
cancel()
|
||||
}
|
||||
|
||||
func TestGetDatacenterShouldReturnError(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
handler func(http.ResponseWriter, *http.Request)
|
||||
|
@ -407,7 +430,7 @@ func TestGetDatacenterShouldReturnError(t *testing.T) {
|
|||
err = d.getDatacenter()
|
||||
|
||||
// An error should be returned.
|
||||
require.Equal(t, tc.errMessage, err.Error())
|
||||
require.EqualError(t, err, tc.errMessage)
|
||||
// Should still be empty.
|
||||
require.Equal(t, "", d.clientDatacenter)
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ package digitalocean
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
@ -23,7 +24,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/digitalocean/godo"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -111,7 +111,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*digitaloceanMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -19,9 +19,9 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
endpoint, err := url.Parse(sdmock.Mock.Endpoint())
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -15,9 +15,9 @@ package discovery
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
||||
|
@ -47,7 +47,7 @@ type DiscovererMetrics interface {
|
|||
|
||||
// DiscovererOptions provides options for a Discoverer.
|
||||
type DiscovererOptions struct {
|
||||
Logger log.Logger
|
||||
Logger *slog.Logger
|
||||
|
||||
Metrics DiscovererMetrics
|
||||
|
||||
|
|
|
@ -17,17 +17,17 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/refresh"
|
||||
|
@ -111,21 +111,21 @@ type Discovery struct {
|
|||
names []string
|
||||
port int
|
||||
qtype uint16
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
metrics *dnsMetrics
|
||||
|
||||
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
||||
lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dnsMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
qtype := dns.TypeSRV
|
||||
|
@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
|
|||
for _, name := range d.names {
|
||||
go func(n string) {
|
||||
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
|
||||
level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err)
|
||||
d.logger.Error("Error refreshing DNS targets", "err", err)
|
||||
}
|
||||
wg.Done()
|
||||
}(name)
|
||||
|
@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||
// CNAME responses can occur with "Type: A" dns_sd_config requests.
|
||||
continue
|
||||
default:
|
||||
level.Warn(d.logger).Log("msg", "Invalid record", "record", record)
|
||||
d.logger.Warn("Invalid record", "record", record)
|
||||
continue
|
||||
}
|
||||
tg.Targets = append(tg.Targets, model.LabelSet{
|
||||
|
@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
|
|||
// error will be generic-looking, because trying to return all the errors
|
||||
// returned by the combination of all name permutations and servers is a
|
||||
// nightmare.
|
||||
func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
conf, err := dns.ClientConfigFromFile(resolvConf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not load resolv.conf: %w", err)
|
||||
|
@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
|
|||
// A non-viable answer is "anything else", which encompasses both various
|
||||
// system-level problems (like network timeouts) and also
|
||||
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
|
||||
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) {
|
||||
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) {
|
||||
client := &dns.Client{}
|
||||
|
||||
for _, server := range conf.Servers {
|
||||
servAddr := net.JoinHostPort(server, conf.Port)
|
||||
msg, err := askServerForName(name, qtype, client, servAddr, true)
|
||||
if err != nil {
|
||||
level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err)
|
||||
logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -16,11 +16,11 @@ package dns
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -40,7 +40,7 @@ func TestDNS(t *testing.T) {
|
|||
testCases := []struct {
|
||||
name string
|
||||
config SDConfig
|
||||
lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error)
|
||||
lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
|
||||
|
||||
expected []*targetgroup.Group
|
||||
}{
|
||||
|
@ -52,7 +52,7 @@ func TestDNS(t *testing.T) {
|
|||
Port: 80,
|
||||
Type: "A",
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return nil, fmt.Errorf("some error")
|
||||
},
|
||||
expected: []*targetgroup.Group{},
|
||||
|
@ -65,7 +65,7 @@ func TestDNS(t *testing.T) {
|
|||
Port: 80,
|
||||
Type: "A",
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{
|
||||
Answer: []dns.RR{
|
||||
&dns.A{A: net.IPv4(192, 0, 2, 2)},
|
||||
|
@ -97,7 +97,7 @@ func TestDNS(t *testing.T) {
|
|||
Port: 80,
|
||||
Type: "AAAA",
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{
|
||||
Answer: []dns.RR{
|
||||
&dns.AAAA{AAAA: net.IPv6loopback},
|
||||
|
@ -128,7 +128,7 @@ func TestDNS(t *testing.T) {
|
|||
Type: "SRV",
|
||||
RefreshInterval: model.Duration(time.Minute),
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{
|
||||
Answer: []dns.RR{
|
||||
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
||||
|
@ -167,7 +167,7 @@ func TestDNS(t *testing.T) {
|
|||
Names: []string{"_mysql._tcp.db.example.com."},
|
||||
RefreshInterval: model.Duration(time.Minute),
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{
|
||||
Answer: []dns.RR{
|
||||
&dns.SRV{Port: 3306, Target: "db1.example.com."},
|
||||
|
@ -198,7 +198,7 @@ func TestDNS(t *testing.T) {
|
|||
Names: []string{"_mysql._tcp.db.example.com."},
|
||||
RefreshInterval: model.Duration(time.Minute),
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{}, nil
|
||||
},
|
||||
expected: []*targetgroup.Group{
|
||||
|
@ -215,7 +215,7 @@ func TestDNS(t *testing.T) {
|
|||
Port: 25,
|
||||
RefreshInterval: model.Duration(time.Minute),
|
||||
},
|
||||
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) {
|
||||
lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
|
||||
return &dns.Msg{
|
||||
Answer: []dns.RR{
|
||||
&dns.MX{Preference: 0, Mx: "smtp1.example.com."},
|
||||
|
|
|
@ -17,13 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -126,7 +126,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery creates a new Eureka discovery for the given role.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*eurekaMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -26,12 +27,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -175,20 +175,20 @@ type Discovery struct {
|
|||
// and how many target groups they contained.
|
||||
// This is used to detect deleted target groups.
|
||||
lastRefresh map[string]int
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
metrics *fileMetrics
|
||||
}
|
||||
|
||||
// NewDiscovery returns a new file discovery for the given paths.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
fm, ok := metrics.(*fileMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
disc := &Discovery{
|
||||
|
@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string {
|
|||
for _, p := range d.paths {
|
||||
files, err := filepath.Glob(p)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err)
|
||||
d.logger.Error("Error expanding glob", "glob", p, "err", err)
|
||||
continue
|
||||
}
|
||||
paths = append(paths, files...)
|
||||
|
@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() {
|
|||
p = "./"
|
||||
}
|
||||
if err := d.watcher.Add(p); err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err)
|
||||
d.logger.Error("Error adding file watch", "path", p, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() {
|
|||
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err)
|
||||
d.logger.Error("Error adding file watcher", "err", err)
|
||||
d.metrics.fileWatcherErrorsCount.Inc()
|
||||
return
|
||||
}
|
||||
|
@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
case err := <-d.watcher.Errors:
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error watching file", "err", err)
|
||||
d.logger.Error("Error watching file", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) {
|
|||
|
||||
// stop shuts down the file watcher.
|
||||
func (d *Discovery) stop() {
|
||||
level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
|
||||
d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
@ -320,10 +320,10 @@ func (d *Discovery) stop() {
|
|||
}
|
||||
}()
|
||||
if err := d.watcher.Close(); err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
|
||||
d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
|
||||
}
|
||||
|
||||
level.Debug(d.logger).Log("msg", "File discovery stopped")
|
||||
d.logger.Debug("File discovery stopped")
|
||||
}
|
||||
|
||||
// refresh reads all files matching the discovery's patterns and sends the respective
|
||||
|
@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
|
|||
if err != nil {
|
||||
d.metrics.fileSDReadErrorsCount.Inc()
|
||||
|
||||
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err)
|
||||
d.logger.Error("Error reading file", "path", p, "err", err)
|
||||
// Prevent deletion down below.
|
||||
ref[p] = d.lastRefresh[p]
|
||||
continue
|
||||
|
@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
|
|||
for f, n := range d.lastRefresh {
|
||||
m, ok := ref[f]
|
||||
if !ok || n > m {
|
||||
level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f)
|
||||
d.logger.Debug("file_sd refresh found file that should be removed", "file", f)
|
||||
d.deleteTimestamp(f)
|
||||
for i := m; i < n; i++ {
|
||||
select {
|
||||
|
|
|
@ -17,12 +17,12 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/oauth2/google"
|
||||
|
@ -129,7 +129,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*gceMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -15,12 +15,12 @@ package hetzner
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -58,7 +58,7 @@ type hcloudDiscovery struct {
|
|||
}
|
||||
|
||||
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
|
||||
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) {
|
||||
func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) {
|
||||
d := &hcloudDiscovery{
|
||||
port: conf.Port,
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) {
|
|||
cfg.HTTPClientConfig.BearerToken = hcloudTestToken
|
||||
cfg.hcloudEndpoint = suite.Mock.Endpoint()
|
||||
|
||||
d, err := newHcloudDiscovery(&cfg, log.NewNopLogger())
|
||||
d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
targetGroups, err := d.refresh(context.Background())
|
||||
|
|
|
@ -17,9 +17,9 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
@ -135,7 +135,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*hetznerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
@ -157,7 +157,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
|
|||
), nil
|
||||
}
|
||||
|
||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
||||
func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
|
||||
switch conf.Role {
|
||||
case HetznerRoleHcloud:
|
||||
if conf.hcloudEndpoint == "" {
|
||||
|
|
|
@ -18,13 +18,13 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/version"
|
||||
|
@ -51,7 +51,7 @@ type robotDiscovery struct {
|
|||
}
|
||||
|
||||
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
|
||||
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) {
|
||||
func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) {
|
||||
d := &robotDiscovery{
|
||||
port: conf.Port,
|
||||
endpoint: conf.robotEndpoint,
|
||||
|
|
|
@ -18,9 +18,9 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) {
|
|||
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
|
||||
cfg.robotEndpoint = suite.Mock.Endpoint()
|
||||
|
||||
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
|
||||
d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
targetGroups, err := d.refresh(context.Background())
|
||||
|
@ -91,12 +91,11 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
|
|||
cfg := DefaultSDConfig
|
||||
cfg.robotEndpoint = suite.Mock.Endpoint()
|
||||
|
||||
d, err := newRobotDiscovery(&cfg, log.NewNopLogger())
|
||||
d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
|
||||
targetGroups, err := d.refresh(context.Background())
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "non 2xx status '401' response during hetzner service discovery with role robot", err.Error())
|
||||
require.EqualError(t, err, "non 2xx status '401' response during hetzner service discovery with role robot")
|
||||
|
||||
require.Empty(t, targetGroups)
|
||||
}
|
||||
|
|
|
@ -19,17 +19,18 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -114,14 +115,14 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new HTTP discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*httpMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...)
|
||||
|
|
|
@ -21,11 +21,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) {
|
|||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) {
|
|||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) {
|
|||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) {
|
|||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
|
||||
require.NoError(t, err)
|
||||
for _, test := range cases {
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -16,9 +16,9 @@ package ionos
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -43,7 +43,7 @@ func init() {
|
|||
type Discovery struct{}
|
||||
|
||||
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ionosMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -16,13 +16,13 @@ package ionos
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
ionoscloud "github.com/ionos-cloud/sdk-go/v6"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -60,7 +60,7 @@ type serverDiscovery struct {
|
|||
datacenterID string
|
||||
}
|
||||
|
||||
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) {
|
||||
func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) {
|
||||
d := &serverDiscovery{
|
||||
port: conf.Port,
|
||||
datacenterID: conf.DatacenterID,
|
||||
|
|
|
@ -17,13 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -33,7 +33,7 @@ import (
|
|||
|
||||
// Endpoints discovers new endpoint targets.
|
||||
type Endpoints struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
endpointsInf cache.SharedIndexInformer
|
||||
serviceInf cache.SharedInformer
|
||||
|
@ -49,9 +49,9 @@ type Endpoints struct {
|
|||
}
|
||||
|
||||
// NewEndpoints returns a new endpoints discovery.
|
||||
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
||||
func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
|
||||
|
@ -92,13 +92,13 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err)
|
||||
l.Error("Error adding endpoints event handler.", "err", err)
|
||||
}
|
||||
|
||||
serviceUpdate := func(o interface{}) {
|
||||
svc, err := convertToService(o)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||
e.logger.Error("converting to Service object failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err)
|
||||
e.logger.Error("retrieving endpoints failed", "err", err)
|
||||
}
|
||||
}
|
||||
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
@ -131,7 +131,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||
l.Error("Error adding services event handler.", "err", err)
|
||||
}
|
||||
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
|
@ -154,7 +154,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
||||
l.Error("Error adding pods event handler.", "err", err)
|
||||
}
|
||||
if e.withNodeMetadata {
|
||||
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
@ -172,7 +172,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
||||
l.Error("Error adding nodes event handler.", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
|
|||
func (e *Endpoints) enqueueNode(nodeName string) {
|
||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
||||
e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -194,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) {
|
|||
func (e *Endpoints) enqueuePod(podNamespacedName string) {
|
||||
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
||||
e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache")
|
||||
e.logger.Error("endpoints informer unable to sync cache")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -247,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
|||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
|
||||
e.logger.Error("splitting key failed", "key", key)
|
||||
return true
|
||||
}
|
||||
|
||||
o, exists, err := e.endpointsStore.GetByKey(key)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
|
||||
e.logger.Error("getting object from store failed", "key", key)
|
||||
return true
|
||||
}
|
||||
if !exists {
|
||||
|
@ -262,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
|
|||
}
|
||||
eps, err := convertToEndpoints(o)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err)
|
||||
e.logger.Error("converting to Endpoints object failed", "err", err)
|
||||
return true
|
||||
}
|
||||
send(ctx, ch, e.buildEndpoints(eps))
|
||||
|
@ -400,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
|
|||
|
||||
v := eps.Labels[apiv1.EndpointsOverCapacity]
|
||||
if v == "truncated" {
|
||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||
e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||
}
|
||||
if v == "warning" {
|
||||
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||
e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
|
||||
}
|
||||
|
||||
// For all seen pods, check all container ports. If they were not covered
|
||||
|
@ -460,7 +460,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
|
||||
obj, exists, err := e.podStore.Get(p)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
||||
e.logger.Error("resolving pod ref failed", "err", err)
|
||||
return nil
|
||||
}
|
||||
if !exists {
|
||||
|
@ -476,7 +476,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
|||
|
||||
obj, exists, err := e.serviceStore.Get(svc)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
||||
e.logger.Error("retrieving service failed", "err", err)
|
||||
return
|
||||
}
|
||||
if !exists {
|
||||
|
@ -487,14 +487,14 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
|
|||
tg.Labels = tg.Labels.Merge(serviceLabels(svc))
|
||||
}
|
||||
|
||||
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet {
|
||||
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet {
|
||||
if nodeName == nil {
|
||||
return tg
|
||||
}
|
||||
|
||||
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err)
|
||||
logger.Error("Error getting node", "node", *nodeName, "err", err)
|
||||
return tg
|
||||
}
|
||||
|
||||
|
|
|
@ -17,13 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -35,7 +35,7 @@ import (
|
|||
|
||||
// EndpointSlice discovers new endpoint targets.
|
||||
type EndpointSlice struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
endpointSliceInf cache.SharedIndexInformer
|
||||
serviceInf cache.SharedInformer
|
||||
|
@ -51,9 +51,9 @@ type EndpointSlice struct {
|
|||
}
|
||||
|
||||
// NewEndpointSlice returns a new endpointslice discovery.
|
||||
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
||||
func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
|
||||
|
@ -92,13 +92,13 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err)
|
||||
l.Error("Error adding endpoint slices event handler.", "err", err)
|
||||
}
|
||||
|
||||
serviceUpdate := func(o interface{}) {
|
||||
svc, err := convertToService(o)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||
e.logger.Error("converting to Service object failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
for _, obj := range e.endpointSliceStore.List() {
|
||||
esa, err := e.getEndpointSliceAdaptor(obj)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
|
||||
e.logger.Error("converting to EndpointSlice object failed", "err", err)
|
||||
continue
|
||||
}
|
||||
if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name {
|
||||
|
@ -131,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||
l.Error("Error adding services event handler.", "err", err)
|
||||
}
|
||||
|
||||
if e.withNodeMetadata {
|
||||
|
@ -150,7 +150,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
||||
l.Error("Error adding nodes event handler.", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
|
|||
func (e *EndpointSlice) enqueueNode(nodeName string) {
|
||||
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err)
|
||||
e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -188,7 +188,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
|
|||
}
|
||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache")
|
||||
e.logger.Error("endpointslice informer unable to sync cache")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -212,13 +212,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
|
|||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "splitting key failed", "key", key)
|
||||
e.logger.Error("splitting key failed", "key", key)
|
||||
return true
|
||||
}
|
||||
|
||||
o, exists, err := e.endpointSliceStore.GetByKey(key)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key)
|
||||
e.logger.Error("getting object from store failed", "key", key)
|
||||
return true
|
||||
}
|
||||
if !exists {
|
||||
|
@ -228,7 +228,7 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
|
|||
|
||||
esa, err := e.getEndpointSliceAdaptor(o)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err)
|
||||
e.logger.Error("converting to EndpointSlice object failed", "err", err)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -470,7 +470,7 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
|
|||
|
||||
obj, exists, err := e.podStore.Get(p)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err)
|
||||
e.logger.Error("resolving pod ref failed", "err", err)
|
||||
return nil
|
||||
}
|
||||
if !exists {
|
||||
|
@ -495,7 +495,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
|
|||
|
||||
obj, exists, err := e.serviceStore.Get(svc)
|
||||
if err != nil {
|
||||
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err)
|
||||
e.logger.Error("retrieving service failed", "err", err)
|
||||
return
|
||||
}
|
||||
if !exists {
|
||||
|
|
|
@ -17,10 +17,9 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
v1 "k8s.io/api/networking/v1"
|
||||
|
@ -32,14 +31,14 @@ import (
|
|||
|
||||
// Ingress implements discovery of Kubernetes ingress.
|
||||
type Ingress struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
informer cache.SharedInformer
|
||||
store cache.Store
|
||||
queue *workqueue.Type
|
||||
}
|
||||
|
||||
// NewIngress returns a new ingress discovery.
|
||||
func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress {
|
||||
func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress {
|
||||
ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd)
|
||||
ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate)
|
||||
ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete)
|
||||
|
@ -66,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err)
|
||||
l.Error("Error adding ingresses event handler.", "err", err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
@ -86,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(i.logger).Log("msg", "ingress informer unable to sync cache")
|
||||
i.logger.Error("ingress informer unable to sync cache")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -127,7 +126,7 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
|||
case *v1.Ingress:
|
||||
ia = newIngressAdaptorFromV1(ingress)
|
||||
default:
|
||||
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err",
|
||||
i.logger.Error("converting to Ingress object failed", "err",
|
||||
fmt.Errorf("received unexpected object: %v", o))
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -25,11 +26,10 @@ import (
|
|||
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
disv1 "k8s.io/api/discovery/v1"
|
||||
|
@ -260,7 +260,7 @@ type Discovery struct {
|
|||
sync.RWMutex
|
||||
client kubernetes.Interface
|
||||
role Role
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
namespaceDiscovery *NamespaceDiscovery
|
||||
discoverers []discovery.Discoverer
|
||||
selectors roleSelector
|
||||
|
@ -285,14 +285,14 @@ func (d *Discovery) getNamespaces() []string {
|
|||
}
|
||||
|
||||
// New creates a new Kubernetes discovery for the given role.
|
||||
func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
||||
func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
|
||||
m, ok := metrics.(*kubernetesMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
var (
|
||||
kcfg *rest.Config
|
||||
|
@ -324,7 +324,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di
|
|||
ownNamespace = string(ownNamespaceContents)
|
||||
}
|
||||
|
||||
level.Info(l).Log("msg", "Using pod service account via in-cluster config")
|
||||
l.Info("Using pod service account via in-cluster config")
|
||||
default:
|
||||
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
|
||||
if err != nil {
|
||||
|
@ -446,7 +446,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
go nodeInf.Run(ctx.Done())
|
||||
}
|
||||
eps := NewEndpointSlice(
|
||||
log.With(d.logger, "role", "endpointslice"),
|
||||
d.logger.With("role", "endpointslice"),
|
||||
informer,
|
||||
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||
|
@ -506,7 +506,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
}
|
||||
|
||||
eps := NewEndpoints(
|
||||
log.With(d.logger, "role", "endpoint"),
|
||||
d.logger.With("role", "endpoint"),
|
||||
d.newEndpointsByNodeInformer(elw),
|
||||
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
|
||||
|
@ -540,7 +540,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
},
|
||||
}
|
||||
pod := NewPod(
|
||||
log.With(d.logger, "role", "pod"),
|
||||
d.logger.With("role", "pod"),
|
||||
d.newPodsByNodeInformer(plw),
|
||||
nodeInformer,
|
||||
d.metrics.eventCount,
|
||||
|
@ -564,7 +564,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
},
|
||||
}
|
||||
svc := NewService(
|
||||
log.With(d.logger, "role", "service"),
|
||||
d.logger.With("role", "service"),
|
||||
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
|
@ -589,7 +589,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
}
|
||||
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
|
||||
ingress := NewIngress(
|
||||
log.With(d.logger, "role", "ingress"),
|
||||
d.logger.With("role", "ingress"),
|
||||
informer,
|
||||
d.metrics.eventCount,
|
||||
)
|
||||
|
@ -598,11 +598,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
}
|
||||
case RoleNode:
|
||||
nodeInformer := d.newNodeInformer(ctx)
|
||||
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount)
|
||||
node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount)
|
||||
d.discoverers = append(d.discoverers, node)
|
||||
go node.informer.Run(ctx.Done())
|
||||
default:
|
||||
level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role)
|
||||
d.logger.Error("unknown Kubernetes discovery kind", "role", d.role)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
|
|
@ -20,8 +20,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -71,7 +71,7 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer
|
|||
|
||||
d := &Discovery{
|
||||
client: clientset,
|
||||
logger: log.NewNopLogger(),
|
||||
logger: promslog.NewNopLogger(),
|
||||
role: role,
|
||||
namespaceDiscovery: &nsDiscovery,
|
||||
ownNamespace: "own-ns",
|
||||
|
|
|
@ -17,13 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -38,16 +38,16 @@ const (
|
|||
|
||||
// Node discovers Kubernetes nodes.
|
||||
type Node struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
informer cache.SharedInformer
|
||||
store cache.Store
|
||||
queue *workqueue.Type
|
||||
}
|
||||
|
||||
// NewNode returns a new node discovery.
|
||||
func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node {
|
||||
func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd)
|
||||
|
@ -76,7 +76,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err)
|
||||
l.Error("Error adding nodes event handler.", "err", err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(n.logger).Log("msg", "node informer unable to sync cache")
|
||||
n.logger.Error("node informer unable to sync cache")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
|
|||
}
|
||||
node, err := convertToNode(o)
|
||||
if err != nil {
|
||||
level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err)
|
||||
n.logger.Error("converting to Node object failed", "err", err)
|
||||
return true
|
||||
}
|
||||
send(ctx, ch, n.buildNode(node))
|
||||
|
@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
|
|||
|
||||
addr, addrMap, err := nodeAddress(node)
|
||||
if err != nil {
|
||||
level.Warn(n.logger).Log("msg", "No node address found", "err", err)
|
||||
n.logger.Warn("No node address found", "err", err)
|
||||
return nil
|
||||
}
|
||||
addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10))
|
||||
|
|
|
@ -17,14 +17,14 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -44,14 +44,14 @@ type Pod struct {
|
|||
nodeInf cache.SharedInformer
|
||||
withNodeMetadata bool
|
||||
store cache.Store
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
queue *workqueue.Type
|
||||
}
|
||||
|
||||
// NewPod creates a new pod discovery.
|
||||
func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod {
|
||||
func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd)
|
||||
|
@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
||||
l.Error("Error adding pods event handler.", "err", err)
|
||||
}
|
||||
|
||||
if p.withNodeMetadata {
|
||||
|
@ -100,7 +100,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err)
|
||||
l.Error("Error adding pods event handler.", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(p.logger).Log("msg", "pod informer unable to sync cache")
|
||||
p.logger.Error("pod informer unable to sync cache")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
|
|||
}
|
||||
pod, err := convertToPod(o)
|
||||
if err != nil {
|
||||
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err)
|
||||
p.logger.Error("converting to Pod object failed", "err", err)
|
||||
return true
|
||||
}
|
||||
send(ctx, ch, p.buildPod(pod))
|
||||
|
@ -246,7 +246,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe
|
|||
func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string {
|
||||
cStatus, err := p.findPodContainerStatus(statuses, containerName)
|
||||
if err != nil {
|
||||
level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err)
|
||||
p.logger.Debug("cannot find container ID", "err", err)
|
||||
return ""
|
||||
}
|
||||
return cStatus.ContainerID
|
||||
|
@ -315,7 +315,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
|
|||
func (p *Pod) enqueuePodsForNode(nodeName string) {
|
||||
pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName)
|
||||
if err != nil {
|
||||
level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err)
|
||||
p.logger.Error("Error getting pods for node", "node", nodeName, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -17,13 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -33,16 +33,16 @@ import (
|
|||
|
||||
// Service implements discovery of Kubernetes services.
|
||||
type Service struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
informer cache.SharedInformer
|
||||
store cache.Store
|
||||
queue *workqueue.Type
|
||||
}
|
||||
|
||||
// NewService returns a new service discovery.
|
||||
func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service {
|
||||
func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
|
||||
|
@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C
|
|||
},
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(l).Log("msg", "Error adding services event handler.", "err", err)
|
||||
l.Error("Error adding services event handler.", "err", err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
|
||||
if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(s.logger).Log("msg", "service informer unable to sync cache")
|
||||
s.logger.Error("service informer unable to sync cache")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
|
|||
}
|
||||
eps, err := convertToService(o)
|
||||
if err != nil {
|
||||
level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err)
|
||||
s.logger.Error("converting to Service object failed", "err", err)
|
||||
return true
|
||||
}
|
||||
send(ctx, ch, s.buildService(eps))
|
||||
|
|
|
@ -17,13 +17,13 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/linode/linodego"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
@ -138,7 +138,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*linodeMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -19,10 +19,10 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -238,7 +238,7 @@ func TestLinodeSDRefresh(t *testing.T) {
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
endpoint, err := url.Parse(sdmock.Endpoint())
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -16,14 +16,14 @@ package discovery
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere
|
|||
}
|
||||
|
||||
// NewManager is the Discovery Manager constructor.
|
||||
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||
func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
mgr := &Manager{
|
||||
logger: logger,
|
||||
|
@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
|
|||
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
|
||||
mgr.metrics = metrics
|
||||
} else {
|
||||
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {
|
|||
// Manager maintains a set of discovery providers and sends each update to a map channel.
|
||||
// Targets are grouped by the target set name.
|
||||
type Manager struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
name string
|
||||
httpOpts []config.HTTPClientOption
|
||||
mtx sync.RWMutex
|
||||
|
@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D
|
|||
}
|
||||
|
||||
func (m *Manager) startProvider(ctx context.Context, p *Provider) {
|
||||
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
||||
m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
updates := make(chan []*targetgroup.Group)
|
||||
|
||||
|
@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
|
|||
case tgs, ok := <-updates:
|
||||
m.metrics.ReceivedUpdates.Inc()
|
||||
if !ok {
|
||||
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name)
|
||||
m.logger.Debug("Discoverer channel closed", "provider", p.name)
|
||||
// Wait for provider cancellation to ensure targets are cleaned up when expected.
|
||||
<-ctx.Done()
|
||||
return
|
||||
|
@ -364,7 +364,7 @@ func (m *Manager) sender() {
|
|||
case m.syncCh <- m.allGroups():
|
||||
default:
|
||||
m.metrics.DelayedUpdates.Inc()
|
||||
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle")
|
||||
m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle")
|
||||
select {
|
||||
case m.triggerSend <- struct{}{}:
|
||||
default:
|
||||
|
@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
|
|||
}
|
||||
typ := cfg.Name()
|
||||
d, err := cfg.NewDiscoverer(DiscovererOptions{
|
||||
Logger: log.With(m.logger, "discovery", typ, "config", setName),
|
||||
Logger: m.logger.With("discovery", typ, "config", setName),
|
||||
HTTPClientOptions: m.httpOpts,
|
||||
Metrics: m.sdMetrics[typ],
|
||||
})
|
||||
if err != nil {
|
||||
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||
m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName)
|
||||
failed++
|
||||
return
|
||||
}
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -675,7 +675,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
|
||||
|
@ -791,7 +791,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -828,7 +828,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -868,7 +868,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -911,7 +911,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -979,7 +979,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1023,7 +1023,7 @@ func TestDiscovererConfigs(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1060,7 +1060,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1141,7 +1141,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1202,7 +1202,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1454,7 +1454,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
|
|||
reg := prometheus.NewRegistry()
|
||||
_, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
require.NotNil(t, discoveryManager)
|
||||
discoveryManager.updatert = 100 * time.Millisecond
|
||||
go discoveryManager.Run()
|
||||
|
@ -1551,7 +1551,7 @@ func TestUnregisterMetrics(t *testing.T) {
|
|||
|
||||
refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
|
||||
|
||||
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics)
|
||||
discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
|
||||
// discoveryManager will be nil if there was an error configuring metrics.
|
||||
require.NotNil(t, discoveryManager)
|
||||
// Unregister all metrics.
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -27,7 +28,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -140,7 +140,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Marathon Discovery.
|
||||
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*marathonMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -16,6 +16,7 @@ package moby
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -28,7 +29,6 @@ import (
|
|||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -128,7 +128,7 @@ type DockerDiscovery struct {
|
|||
}
|
||||
|
||||
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
|
||||
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
||||
func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
|
||||
m, ok := metrics.(*dockerMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -19,9 +19,9 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -48,7 +48,7 @@ host: %s
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -226,7 +226,7 @@ host: %s
|
|||
require.NoError(t, metrics.Register())
|
||||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -16,13 +16,13 @@ package moby
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -125,7 +125,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*dockerswarmMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -18,9 +18,9 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -48,7 +48,7 @@ host: %s
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -18,9 +18,9 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -48,7 +48,7 @@ host: %s
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -349,7 +349,7 @@ filters:
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -18,9 +18,9 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
|
@ -48,7 +48,7 @@ host: %s
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -17,12 +17,12 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
nomad "github.com/hashicorp/nomad/api"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
@ -121,7 +121,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*nomadMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -21,9 +21,9 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -160,7 +160,7 @@ func TestNomadSDRefresh(t *testing.T) {
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
tgs, err := d.refresh(context.Background())
|
||||
|
|
|
@ -16,10 +16,10 @@ package openstack
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
|
||||
|
@ -43,14 +43,14 @@ type HypervisorDiscovery struct {
|
|||
provider *gophercloud.ProviderClient
|
||||
authOpts *gophercloud.AuthOptions
|
||||
region string
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
port int
|
||||
availability gophercloud.Availability
|
||||
}
|
||||
|
||||
// newHypervisorDiscovery returns a new hypervisor discovery.
|
||||
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
||||
port int, region string, availability gophercloud.Availability, l log.Logger,
|
||||
port int, region string, availability gophercloud.Availability, l *slog.Logger,
|
||||
) *HypervisorDiscovery {
|
||||
return &HypervisorDiscovery{
|
||||
provider: provider, authOpts: opts,
|
||||
|
|
|
@ -93,6 +93,5 @@ func TestOpenstackSDHypervisorRefreshWithDoneContext(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
_, err := hypervisor.refresh(ctx)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||
require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||
}
|
||||
|
|
|
@ -16,17 +16,17 @@ package openstack
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
|
@ -52,7 +52,7 @@ type InstanceDiscovery struct {
|
|||
provider *gophercloud.ProviderClient
|
||||
authOpts *gophercloud.AuthOptions
|
||||
region string
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
port int
|
||||
allTenants bool
|
||||
availability gophercloud.Availability
|
||||
|
@ -60,10 +60,10 @@ type InstanceDiscovery struct {
|
|||
|
||||
// NewInstanceDiscovery returns a new instance discovery.
|
||||
func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
|
||||
port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger,
|
||||
port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger,
|
||||
) *InstanceDiscovery {
|
||||
if l == nil {
|
||||
l = log.NewNopLogger()
|
||||
l = promslog.NewNopLogger()
|
||||
}
|
||||
return &InstanceDiscovery{
|
||||
provider: provider, authOpts: opts,
|
||||
|
@ -134,7 +134,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
|
||||
for _, s := range instanceList {
|
||||
if len(s.Addresses) == 0 {
|
||||
level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID)
|
||||
i.logger.Info("Got no IP address", "instance", s.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
if !nameOk {
|
||||
flavorID, idOk := s.Flavor["id"].(string)
|
||||
if !idOk {
|
||||
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string")
|
||||
i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string")
|
||||
continue
|
||||
}
|
||||
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
|
||||
|
@ -171,22 +171,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
|
|||
for pool, address := range s.Addresses {
|
||||
md, ok := address.([]interface{})
|
||||
if !ok {
|
||||
level.Warn(i.logger).Log("msg", "Invalid type for address, expected array")
|
||||
i.logger.Warn("Invalid type for address, expected array")
|
||||
continue
|
||||
}
|
||||
if len(md) == 0 {
|
||||
level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID)
|
||||
i.logger.Debug("Got no IP address", "instance", s.ID)
|
||||
continue
|
||||
}
|
||||
for _, address := range md {
|
||||
md1, ok := address.(map[string]interface{})
|
||||
if !ok {
|
||||
level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict")
|
||||
i.logger.Warn("Invalid type for address, expected dict")
|
||||
continue
|
||||
}
|
||||
addr, ok := md1["addr"].(string)
|
||||
if !ok {
|
||||
level.Warn(i.logger).Log("msg", "Invalid type for address, expected string")
|
||||
i.logger.Warn("Invalid type for address, expected string")
|
||||
continue
|
||||
}
|
||||
if _, ok := floatingIPPresent[addr]; ok {
|
||||
|
|
|
@ -134,6 +134,5 @@ func TestOpenstackSDInstanceRefreshWithDoneContext(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
_, err := hypervisor.refresh(ctx)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||
require.ErrorContains(t, err, context.Canceled.Error(), "%q doesn't contain %q", err, context.Canceled)
|
||||
}
|
||||
|
|
|
@ -17,10 +17,10 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/mwitkow/go-conntrack"
|
||||
|
@ -142,7 +142,7 @@ type refresher interface {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*openstackMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
@ -163,7 +163,7 @@ func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetr
|
|||
), nil
|
||||
}
|
||||
|
||||
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
|
||||
func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
|
||||
var opts gophercloud.AuthOptions
|
||||
if conf.IdentityEndpoint == "" {
|
||||
var err error
|
||||
|
|
|
@ -16,13 +16,12 @@ package ovhcloud
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/ovh/go-ovh/ovh"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -55,10 +54,10 @@ type dedicatedServer struct {
|
|||
type dedicatedServerDiscovery struct {
|
||||
*refresh.Discovery
|
||||
config *SDConfig
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery {
|
||||
func newDedicatedServerDiscovery(conf *SDConfig, logger *slog.Logger) *dedicatedServerDiscovery {
|
||||
return &dedicatedServerDiscovery{config: conf, logger: logger}
|
||||
}
|
||||
|
||||
|
@ -115,10 +114,7 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
|
|||
for _, dedicatedServerName := range dedicatedServerList {
|
||||
dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName)
|
||||
if err != nil {
|
||||
err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error())
|
||||
continue
|
||||
}
|
||||
dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer)
|
||||
|
|
|
@ -21,8 +21,8 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
@ -41,7 +41,7 @@ application_secret: %s
|
|||
consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest)
|
||||
|
||||
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
|
||||
d, err := newRefresher(&cfg, log.NewNopLogger())
|
||||
d, err := newRefresher(&cfg, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
targetGroups, err := d.refresh(ctx)
|
||||
|
|
|
@ -17,10 +17,10 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/ovh/go-ovh/ovh"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
@ -137,7 +137,7 @@ func parseIPList(ipList []string) ([]netip.Addr, error) {
|
|||
return ipAddresses, nil
|
||||
}
|
||||
|
||||
func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
|
||||
func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
|
||||
switch conf.Service {
|
||||
case "vps":
|
||||
return newVpsDiscovery(conf, logger), nil
|
||||
|
@ -148,7 +148,7 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*ovhcloudMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -20,11 +20,11 @@ import (
|
|||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -121,7 +121,7 @@ func TestParseIPs(t *testing.T) {
|
|||
|
||||
func TestDiscoverer(t *testing.T) {
|
||||
conf, _ := getMockConf("vps")
|
||||
logger := testutil.NewLogger(t)
|
||||
logger := promslog.NewNopLogger()
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
refreshMetrics := discovery.NewRefreshMetrics(reg)
|
||||
|
|
|
@ -16,13 +16,12 @@ package ovhcloud
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/ovh/go-ovh/ovh"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -68,10 +67,10 @@ type virtualPrivateServer struct {
|
|||
type vpsDiscovery struct {
|
||||
*refresh.Discovery
|
||||
config *SDConfig
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery {
|
||||
func newVpsDiscovery(conf *SDConfig, logger *slog.Logger) *vpsDiscovery {
|
||||
return &vpsDiscovery{config: conf, logger: logger}
|
||||
}
|
||||
|
||||
|
@ -133,10 +132,7 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
|
|||
for _, vpsName := range vpsList {
|
||||
vpsDetailed, err := getVpsDetails(client, vpsName)
|
||||
if err != nil {
|
||||
err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error())
|
||||
continue
|
||||
}
|
||||
vpsDetailedList = append(vpsDetailedList, *vpsDetailed)
|
||||
|
|
|
@ -23,8 +23,8 @@ import (
|
|||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -43,7 +43,7 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
|
|||
|
||||
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
|
||||
|
||||
d, err := newRefresher(&cfg, log.NewNopLogger())
|
||||
d, err := newRefresher(&cfg, promslog.NewNopLogger())
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
targetGroups, err := d.refresh(ctx)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -27,11 +28,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/prometheus/common/version"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -138,14 +139,14 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new PuppetDB discovery for the given config.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*puppetdbMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -70,7 +70,7 @@ func TestPuppetSlashInURL(t *testing.T) {
|
|||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, apiURL, d.url)
|
||||
|
||||
|
@ -94,7 +94,7 @@ func TestPuppetDBRefresh(t *testing.T) {
|
|||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -142,7 +142,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
|
|||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -201,7 +201,7 @@ func TestPuppetDBInvalidCode(t *testing.T) {
|
|||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -229,7 +229,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) {
|
|||
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
|
||||
require.NoError(t, metrics.Register())
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -16,17 +16,17 @@ package refresh
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Logger log.Logger
|
||||
Logger *slog.Logger
|
||||
Mech string
|
||||
Interval time.Duration
|
||||
RefreshF func(ctx context.Context) ([]*targetgroup.Group, error)
|
||||
|
@ -35,7 +35,7 @@ type Options struct {
|
|||
|
||||
// Discovery implements the Discoverer interface.
|
||||
type Discovery struct {
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
interval time.Duration
|
||||
refreshf func(ctx context.Context) ([]*targetgroup.Group, error)
|
||||
metrics *discovery.RefreshMetrics
|
||||
|
@ -45,9 +45,9 @@ type Discovery struct {
|
|||
func NewDiscovery(opts Options) *Discovery {
|
||||
m := opts.MetricsInstantiator.Instantiate(opts.Mech)
|
||||
|
||||
var logger log.Logger
|
||||
var logger *slog.Logger
|
||||
if opts.Logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
} else {
|
||||
logger = opts.Logger
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
tgs, err := d.refresh(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error())
|
||||
d.logger.Error("Unable to refresh target groups", "err", err.Error())
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
|
@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
tgs, err := d.refresh(ctx)
|
||||
if err != nil {
|
||||
if !errors.Is(ctx.Err(), context.Canceled) {
|
||||
level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error())
|
||||
d.logger.Error("Unable to refresh target groups", "err", err.Error())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -17,12 +17,12 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -185,7 +185,7 @@ func init() {
|
|||
// the Discoverer interface.
|
||||
type Discovery struct{}
|
||||
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
|
||||
m, ok := metrics.(*scalewayMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -19,12 +19,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/mwitkow/go-conntrack"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
@ -146,7 +146,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// New returns a new Discovery which periodically refreshes its targets.
|
||||
func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*tritonMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
@ -182,8 +181,7 @@ func TestTritonSDRefreshNoServer(t *testing.T) {
|
|||
td, m, _ := newTritonDiscovery(conf)
|
||||
|
||||
_, err := td.refresh(context.Background())
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.Contains(err.Error(), "an error occurred when requesting targets from the discovery endpoint"))
|
||||
require.ErrorContains(t, err, "an error occurred when requesting targets from the discovery endpoint")
|
||||
m.Unregister()
|
||||
}
|
||||
|
||||
|
@ -193,8 +191,7 @@ func TestTritonSDRefreshCancelled(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
_, err := td.refresh(ctx)
|
||||
require.Error(t, err)
|
||||
require.True(t, strings.Contains(err.Error(), context.Canceled.Error()))
|
||||
require.ErrorContains(t, err, context.Canceled.Error())
|
||||
m.Unregister()
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
@ -24,7 +25,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/kolo/xmlrpc"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
|
@ -109,7 +109,7 @@ type Discovery struct {
|
|||
entitlement string
|
||||
separator string
|
||||
interval time.Duration
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewDiscovererMetrics implements discovery.Config.
|
||||
|
@ -212,7 +212,7 @@ func getEndpointInfoForSystems(
|
|||
}
|
||||
|
||||
// NewDiscovery returns a uyuni discovery for the given configuration.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*uyuniMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -16,13 +16,13 @@ package vultr
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
|
@ -114,7 +114,7 @@ type Discovery struct {
|
|||
}
|
||||
|
||||
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
|
||||
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
|
||||
m, ok := metrics.(*vultrMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
|
|
@ -19,9 +19,9 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -57,7 +57,7 @@ func TestVultrSDRefresh(t *testing.T) {
|
|||
defer metrics.Unregister()
|
||||
defer refreshMetrics.Unregister()
|
||||
|
||||
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics)
|
||||
d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
|
||||
require.NoError(t, err)
|
||||
endpoint, err := url.Parse(sdMock.Mock.Endpoint())
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -52,16 +52,14 @@ func TestMakeXDSResourceHttpEndpointEmptyServerURLScheme(t *testing.T) {
|
|||
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("127.0.0.1"), "monitoring")
|
||||
|
||||
require.Empty(t, endpointURL)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "invalid xDS server URL", err.Error())
|
||||
require.EqualError(t, err, "invalid xDS server URL")
|
||||
}
|
||||
|
||||
func TestMakeXDSResourceHttpEndpointEmptyServerURLHost(t *testing.T) {
|
||||
endpointURL, err := makeXDSResourceHTTPEndpointURL(ProtocolV3, urlMustParse("grpc://127.0.0.1"), "monitoring")
|
||||
|
||||
require.Empty(t, endpointURL)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "must be either 'http' or 'https'")
|
||||
require.ErrorContains(t, err, "must be either 'http' or 'https'")
|
||||
}
|
||||
|
||||
func TestMakeXDSResourceHttpEndpoint(t *testing.T) {
|
||||
|
|
|
@ -15,14 +15,14 @@ package xds
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
|
@ -99,7 +99,7 @@ func (c *KumaSDConfig) SetDirectory(dir string) {
|
|||
func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
|
||||
logger := opts.Logger
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
return NewKumaHTTPDiscovery(c, logger, opts.Metrics)
|
||||
|
@ -158,7 +158,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
|
|||
return targets, nil
|
||||
}
|
||||
|
||||
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
|
||||
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
|
||||
m, ok := metrics.(*xdsMetrics)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid discovery metrics type")
|
||||
|
@ -170,7 +170,7 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discove
|
|||
var err error
|
||||
clientID, err = osutil.GetFQDN()
|
||||
if err != nil {
|
||||
level.Debug(logger).Log("msg", "error getting FQDN", "err", err)
|
||||
logger.Debug("error getting FQDN", "err", err)
|
||||
clientID = "prometheus"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,13 +23,14 @@ package xds
|
|||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
_ "github.com/envoyproxy/protoc-gen-validate/validate"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -201,9 +201,8 @@ func TestKumaMadsV1ResourceParserInvalidResources(t *testing.T) {
|
|||
}}
|
||||
groups, err := kumaMadsV1ResourceParser(resources, KumaMadsV1ResourceTypeURL)
|
||||
require.Nil(t, groups)
|
||||
require.Error(t, err)
|
||||
|
||||
require.Contains(t, err.Error(), "cannot parse")
|
||||
require.ErrorContains(t, err, "cannot parse")
|
||||
}
|
||||
|
||||
func TestNewKumaHTTPDiscovery(t *testing.T) {
|
||||
|
|
|
@ -15,11 +15,10 @@ package xds
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/common/config"
|
||||
"github.com/prometheus/common/model"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
|
@ -104,7 +103,7 @@ type fetchDiscovery struct {
|
|||
refreshInterval time.Duration
|
||||
|
||||
parseResources resourceParser
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
metrics *xdsMetrics
|
||||
}
|
||||
|
@ -140,7 +139,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "error parsing resources", "err", err)
|
||||
d.logger.Error("error parsing resources", "err", err)
|
||||
d.metrics.fetchFailuresCount.Inc()
|
||||
return
|
||||
}
|
||||
|
@ -153,12 +152,12 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou
|
|||
|
||||
parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "error parsing resources", "err", err)
|
||||
d.logger.Error("error parsing resources", "err", err)
|
||||
d.metrics.fetchFailuresCount.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets))
|
||||
d.logger.Debug("Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets))
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -22,9 +22,9 @@ import (
|
|||
"time"
|
||||
|
||||
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/goleak"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
@ -90,7 +90,7 @@ func constantResourceParser(targets []model.LabelSet, err error) resourceParser
|
|||
}
|
||||
}
|
||||
|
||||
var nopLogger = log.NewNopLogger()
|
||||
var nopLogger = promslog.NewNopLogger()
|
||||
|
||||
type testResourceClient struct {
|
||||
resourceTypeURL string
|
||||
|
|
|
@ -18,15 +18,16 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-zookeeper/zk"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
"github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -146,16 +147,16 @@ type Discovery struct {
|
|||
treeCaches []*treecache.ZookeeperTreeCache
|
||||
|
||||
parse func(data []byte, path string) (model.LabelSet, error)
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
// NewNerveDiscovery returns a new Discovery for the given Nerve config.
|
||||
func NewNerveDiscovery(conf *NerveSDConfig, logger log.Logger) (*Discovery, error) {
|
||||
func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) {
|
||||
return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember)
|
||||
}
|
||||
|
||||
// NewServersetDiscovery returns a new Discovery for the given serverset config.
|
||||
func NewServersetDiscovery(conf *ServersetSDConfig, logger log.Logger) (*Discovery, error) {
|
||||
func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) {
|
||||
return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember)
|
||||
}
|
||||
|
||||
|
@ -165,11 +166,11 @@ func NewDiscovery(
|
|||
srvs []string,
|
||||
timeout time.Duration,
|
||||
paths []string,
|
||||
logger log.Logger,
|
||||
logger *slog.Logger,
|
||||
pf func(data []byte, path string) (model.LabelSet, error),
|
||||
) (*Discovery, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
logger = promslog.NewNopLogger()
|
||||
}
|
||||
|
||||
conn, _, err := zk.Connect(
|
||||
|
|
|
@ -796,14 +796,17 @@ The following meta labels are available on targets during [relabeling](#relabel_
|
|||
services:
|
||||
[ - <string> ]
|
||||
|
||||
# See https://www.consul.io/api/catalog.html#list-nodes-for-service to know more
|
||||
# about the possible filters that can be used.
|
||||
# A Consul Filter expression used to filter the catalog results
|
||||
# See https://www.consul.io/api-docs/catalog#list-services to know more
|
||||
# about the filter expressions that can be used.
|
||||
[ filter: <string> ]
|
||||
|
||||
# The `tags` and `node_meta` fields are deprecated in Consul in favor of `filter`.
|
||||
# An optional list of tags used to filter nodes for a given service. Services must contain all tags in the list.
|
||||
tags:
|
||||
[ - <string> ]
|
||||
|
||||
# Node metadata key/value pairs to filter nodes for a given service.
|
||||
# Node metadata key/value pairs to filter nodes for a given service. As of Consul 1.14, consider `filter` instead.
|
||||
[ node_meta:
|
||||
[ <string>: <string> ... ] ]
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ timestamps are always represented as Unix timestamps in seconds.
|
|||
* `<series_selector>`: Prometheus [time series
|
||||
selectors](basics.md#time-series-selectors) like `http_requests_total` or
|
||||
`http_requests_total{method=~"(GET|POST)"}` and need to be URL-encoded.
|
||||
* `<duration>`: [Prometheus duration strings](basics.md#time-durations).
|
||||
* `<duration>`: [the subset of Prometheus float literals using time units](basics.md#float-literals-and-time-durations).
|
||||
For example, `5m` refers to a duration of 5 minutes.
|
||||
* `<bool>`: boolean values (strings `true` and `false`).
|
||||
|
||||
|
@ -1393,3 +1393,69 @@ Enable the OTLP receiver by setting
|
|||
endpoint is `/api/v1/otlp/v1/metrics`.
|
||||
|
||||
*New in v2.47*
|
||||
|
||||
## Notifications
|
||||
|
||||
The following endpoints provide information about active status notifications concerning the Prometheus server itself.
|
||||
Notifications are used in the web UI.
|
||||
|
||||
These endpoints are **experimental**. They may change in the future.
|
||||
|
||||
### Active Notifications
|
||||
|
||||
The `/api/v1/notifications` endpoint returns a list of all currently active notifications.
|
||||
|
||||
```
|
||||
GET /api/v1/notifications
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ curl http://localhost:9090/api/v1/notifications
|
||||
{
|
||||
"status": "success",
|
||||
"data": [
|
||||
{
|
||||
"text": "Prometheus is shutting down and gracefully stopping all operations.",
|
||||
"date": "2024-10-07T12:33:08.551376578+02:00",
|
||||
"active": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
*New in v3.0*
|
||||
|
||||
### Live Notifications
|
||||
|
||||
The `/api/v1/notifications/live` endpoint streams live notifications as they occur, using [Server-Sent Events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events). Deleted notifications are sent with `active: false`. Active notifications will be sent when connecting to the endpoint.
|
||||
|
||||
```
|
||||
GET /api/v1/notifications/live
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
$ curl http://localhost:9090/api/v1/notifications/live
|
||||
data: {
|
||||
"status": "success",
|
||||
"data": [
|
||||
{
|
||||
"text": "Prometheus is shutting down and gracefully stopping all operations.",
|
||||
"date": "2024-10-07T12:33:08.551376578+02:00",
|
||||
"active": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** The `/notifications/live` endpoint will return a `204 No Content` response if the maximum number of subscribers has been reached. You can set the maximum number of listeners with the flag `--web.max-notifications-subscribers`, which defaults to 16.
|
||||
|
||||
```
|
||||
GET /api/v1/notifications/live
|
||||
204 No Content
|
||||
```
|
||||
|
||||
*New in v3.0*
|
||||
|
|
|
@ -35,8 +35,9 @@ evaluate to one of four types:
|
|||
|
||||
Depending on the use-case (e.g. when graphing vs. displaying the output of an
|
||||
expression), only some of these types are legal as the result of a
|
||||
user-specified expression. For example, an expression that returns an instant
|
||||
vector is the only type which can be graphed.
|
||||
user-specified expression.
|
||||
For [instant queries](api.md#instant-queries), any of the above data types are allowed as the root of the expression.
|
||||
[Range queries](api.md/#range-queries) only support scalar-typed and instant-vector-typed expressions.
|
||||
|
||||
_Notes about the experimental native histograms:_
|
||||
|
||||
|
@ -68,9 +69,10 @@ Example:
|
|||
'these are unescaped: \n \\ \t'
|
||||
`these are not unescaped: \n ' " \t`
|
||||
|
||||
### Float literals
|
||||
### Float literals and time durations
|
||||
|
||||
Scalar float values can be written as literal integer or floating-point numbers in the format (whitespace only included for better readability):
|
||||
Scalar float values can be written as literal integer or floating-point numbers
|
||||
in the format (whitespace only included for better readability):
|
||||
|
||||
[-+]?(
|
||||
[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?
|
||||
|
@ -87,16 +89,53 @@ Examples:
|
|||
0x8f
|
||||
-Inf
|
||||
NaN
|
||||
|
||||
|
||||
As of version 2.54, float literals can also be represented using the syntax of time durations, where the time duration is converted into a float value corresponding to the number of seconds the time duration represents. This is an experimental feature and might still change.
|
||||
Additionally, underscores (`_`) can be used in between decimal or hexadecimal
|
||||
digits to improve readability.
|
||||
|
||||
Examples:
|
||||
|
||||
1s # Equivalent to 1.0
|
||||
2m # Equivalent to 120.0
|
||||
1ms # Equivalent to 0.001
|
||||
|
||||
1_000_000
|
||||
.123_456_789
|
||||
0x_53_AB_F3_82
|
||||
|
||||
Float literals are also used to specify durations in seconds. For convenience,
|
||||
decimal integer numbers may be combined with the following
|
||||
time units:
|
||||
|
||||
* `ms` – milliseconds
|
||||
* `s` – seconds – 1s equals 1000ms
|
||||
* `m` – minutes – 1m equals 60s (ignoring leap seconds)
|
||||
* `h` – hours – 1h equals 60m
|
||||
* `d` – days – 1d equals 24h (ignoring so-called daylight saving time)
|
||||
* `w` – weeks – 1w equals 7d
|
||||
* `y` – years – 1y equals 365d (ignoring leap days)
|
||||
|
||||
Suffixing a decimal integer number with one of the units above is a different
|
||||
representation of the equivalent number of seconds as a bare float literal.
|
||||
|
||||
Examples:
|
||||
|
||||
1s # Equivalent to 1.
|
||||
2m # Equivalent to 120.
|
||||
1ms # Equivalent to 0.001.
|
||||
-2h # Equivalent to -7200.
|
||||
|
||||
The following examples do _not_ work:
|
||||
|
||||
0xABm # No suffixing of hexadecimal numbers.
|
||||
1.5h # Time units cannot be combined with a floating point.
|
||||
+Infd # No suffixing of ±Inf or NaN.
|
||||
|
||||
Multiple units can be combined by concatenation of suffixed integers. Units
|
||||
must be ordered from the longest to the shortest. A given unit must only appear
|
||||
once per float literal.
|
||||
|
||||
Examples:
|
||||
|
||||
1h30m # Equivalent to 5400s and thus 5400.
|
||||
12h34m56s # Equivalent to 45296s and thus 45296.
|
||||
54s321ms # Equivalent to 54.321.
|
||||
|
||||
## Time series selectors
|
||||
|
||||
|
@ -208,53 +247,22 @@ syntax](https://github.com/google/re2/wiki/Syntax).
|
|||
### Range Vector Selectors
|
||||
|
||||
Range vector literals work like instant vector literals, except that they
|
||||
select a range of samples back from the current instant. Syntactically, a [time
|
||||
duration](#time-durations) is appended in square brackets (`[]`) at the end of
|
||||
a vector selector to specify how far back in time values should be fetched for
|
||||
each resulting range vector element. The range is a left-open and right-closed interval,
|
||||
i.e. samples with timestamps coinciding with the left boundary of the range are excluded from the selection,
|
||||
while samples coinciding with the right boundary of the range are included in the selection.
|
||||
select a range of samples back from the current instant. Syntactically, a
|
||||
[float literal](#float-literals-and-time-durations) is appended in square
|
||||
brackets (`[]`) at the end of a vector selector to specify for how many seconds
|
||||
back in time values should be fetched for each resulting range vector element.
|
||||
Commonly, the float literal uses the syntax with one or more time units, e.g.
|
||||
`[5m]`. The range is a left-open and right-closed interval, i.e. samples with
|
||||
timestamps coinciding with the left boundary of the range are excluded from the
|
||||
selection, while samples coinciding with the right boundary of the range are
|
||||
included in the selection.
|
||||
|
||||
In this example, we select all the values recorded less than 5m ago for all time series
|
||||
that have the metric name `http_requests_total` and
|
||||
a `job` label set to `prometheus`:
|
||||
In this example, we select all the values recorded less than 5m ago for all
|
||||
time series that have the metric name `http_requests_total` and a `job` label
|
||||
set to `prometheus`:
|
||||
|
||||
http_requests_total{job="prometheus"}[5m]
|
||||
|
||||
### Time Durations
|
||||
|
||||
Time durations are specified as a number, followed immediately by one of the
|
||||
following units:
|
||||
|
||||
* `ms` - milliseconds
|
||||
* `s` - seconds
|
||||
* `m` - minutes
|
||||
* `h` - hours
|
||||
* `d` - days - assuming a day always has 24h
|
||||
* `w` - weeks - assuming a week always has 7d
|
||||
* `y` - years - assuming a year always has 365d<sup>1</sup>
|
||||
|
||||
<sup>1</sup> For days in a year, the leap day is ignored, and conversely, for a minute, a leap second is ignored.
|
||||
|
||||
Time durations can be combined by concatenation. Units must be ordered from the
|
||||
longest to the shortest. A given unit must only appear once in a time duration.
|
||||
|
||||
Here are some examples of valid time durations:
|
||||
|
||||
5h
|
||||
1h30m
|
||||
5m
|
||||
10s
|
||||
|
||||
|
||||
As of version 2.54, time durations can also be represented using the syntax of float literals, implying the number of seconds of the time duration. This is an experimental feature and might still change.
|
||||
|
||||
Examples:
|
||||
|
||||
1.0 # Equivalent to 1s
|
||||
0.001 # Equivalent to 1ms
|
||||
120 # Equivalent to 2m
|
||||
|
||||
### Offset modifier
|
||||
|
||||
The `offset` modifier allows changing the time offset for individual
|
||||
|
@ -337,7 +345,7 @@ Note that the `@` modifier allows a query to look ahead of its evaluation time.
|
|||
|
||||
Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector.
|
||||
|
||||
Syntax: `<instant_query> '[' <range> ':' [<resolution>] ']' [ @ <float_literal> ] [ offset <duration> ]`
|
||||
Syntax: `<instant_query> '[' <range> ':' [<resolution>] ']' [ @ <float_literal> ] [ offset <float_literal> ]`
|
||||
|
||||
* `<resolution>` is optional. Default is the global evaluation interval.
|
||||
|
||||
|
|
|
@ -17,7 +17,8 @@ Request are made to the following endpoint.
|
|||
|
||||
### Samples
|
||||
|
||||
This returns a message that includes a list of raw samples.
|
||||
This returns a message that includes a list of raw samples matching the
|
||||
requested query.
|
||||
|
||||
### Streamed Chunks
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -26,10 +27,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/alecthomas/kingpin/v2"
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/promslog"
|
||||
|
||||
prom_discovery "github.com/prometheus/prometheus/discovery"
|
||||
"github.com/prometheus/prometheus/discovery/targetgroup"
|
||||
|
@ -41,7 +41,7 @@ var (
|
|||
a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.")
|
||||
outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String()
|
||||
listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String()
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
|
||||
// addressLabel is the name for the label containing a target's address.
|
||||
addressLabel = model.MetaLabelPrefix + "consul_address"
|
||||
|
@ -90,7 +90,7 @@ type discovery struct {
|
|||
address string
|
||||
refreshInterval int
|
||||
tagSeparator string
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
oldSourceList map[string]bool
|
||||
}
|
||||
|
||||
|
@ -164,7 +164,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
var srvs map[string][]string
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error getting services list", "err", err)
|
||||
d.logger.Error("Error getting services list", "err", err)
|
||||
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
|
||||
continue
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error reading services list", "err", err)
|
||||
d.logger.Error("Error reading services list", "err", err)
|
||||
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
|
||||
continue
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
err = json.Unmarshal(b, &srvs)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error parsing services list", "err", err)
|
||||
d.logger.Error("Error parsing services list", "err", err)
|
||||
time.Sleep(time.Duration(d.refreshInterval) * time.Second)
|
||||
continue
|
||||
}
|
||||
|
@ -200,13 +200,13 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
|
|||
}
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name))
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error getting services nodes", "service", name, "err", err)
|
||||
d.logger.Error("Error getting services nodes", "service", name, "err", err)
|
||||
break
|
||||
}
|
||||
|
||||
tg, err := d.parseServiceNodes(resp, name)
|
||||
if err != nil {
|
||||
level.Error(d.logger).Log("msg", "Error parsing services nodes", "service", name, "err", err)
|
||||
d.logger.Error("Error parsing services nodes", "service", name, "err", err)
|
||||
break
|
||||
}
|
||||
tgs = append(tgs, tg)
|
||||
|
@ -254,8 +254,7 @@ func main() {
|
|||
fmt.Println("err: ", err)
|
||||
return
|
||||
}
|
||||
logger = log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout))
|
||||
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
|
||||
logger = promslog.New(&promslog.Config{})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -272,7 +271,7 @@ func main() {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err)
|
||||
logger.Error("failed to create discovery metrics", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -280,7 +279,7 @@ func main() {
|
|||
refreshMetrics := prom_discovery.NewRefreshMetrics(reg)
|
||||
metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err)
|
||||
logger.Error("failed to register service discovery metrics", "err", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -18,13 +18,12 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
|
@ -55,7 +54,7 @@ type Adapter struct {
|
|||
manager *discovery.Manager
|
||||
output string
|
||||
name string
|
||||
logger log.Logger
|
||||
logger *slog.Logger
|
||||
}
|
||||
|
||||
func mapToArray(m map[string]*customSD) []customSD {
|
||||
|
@ -106,7 +105,7 @@ func (a *Adapter) refreshTargetGroups(allTargetGroups map[string][]*targetgroup.
|
|||
a.groups = tempGroups
|
||||
err := a.writeOutput()
|
||||
if err != nil {
|
||||
level.Error(log.With(a.logger, "component", "sd-adapter")).Log("err", err)
|
||||
a.logger.With("component", "sd-adapter").Error("failed to write output", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -163,7 +162,7 @@ func (a *Adapter) Run() {
|
|||
}
|
||||
|
||||
// NewAdapter creates a new instance of Adapter.
|
||||
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter {
|
||||
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger *slog.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter {
|
||||
return &Adapter{
|
||||
ctx: ctx,
|
||||
disc: d,
|
||||
|
|
|
@ -4,7 +4,6 @@ go 1.22.0
|
|||
|
||||
require (
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0
|
||||
github.com/go-kit/log v0.2.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.11.6
|
||||
|
@ -26,6 +25,7 @@ require (
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-kit/log v0.2.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue