chore!: adopt log/slog, remove go-kit/log

For: #14355

This commit updates Prometheus to adopt stdlib's log/slog package in
favor of go-kit/log. As part of converting to use slog, several other
related changes are required to get prometheus working, including:
- removed unused logging util func `RateLimit()`
- forward ported the util/logging/Deduper logging by implementing a small custom slog.Handler that does the deduping before chaining log calls to the underlying real slog.Logger
- move some of the json file logging functionality to use prom/common package functionality
- refactored some of the new json file logging for scraping
- changes to promql.QueryLogger interface to swap out logging methods for relevant slog sugar wrappers
- updated lots of tests that used/replicated custom logging functionality, attempting to keep the logical goal of the tests consistent after the transition
- added a healthy amount of `if logger == nil { $makeLogger }` type conditional checks amongst various functions where none were provided -- old code that used the go-kit/log.Logger interface had several places where there were nil references when trying to use functions like `With()` to add keyvals on the new *slog.Logger type

Signed-off-by: TJ Hoplock <t.hoplock@gmail.com>
This commit is contained in:
TJ Hoplock 2024-09-09 21:41:53 -04:00
parent 65f6103539
commit 6ebfbd2d54
162 changed files with 1534 additions and 1691 deletions

View file

@ -23,6 +23,7 @@ linters:
- usestdlibvars - usestdlibvars
- whitespace - whitespace
- loggercheck - loggercheck
- sloglint
issues: issues:
max-issues-per-linter: 0 max-issues-per-linter: 0
@ -100,8 +101,6 @@ linters-settings:
- (net/http.ResponseWriter).Write - (net/http.ResponseWriter).Write
# No need to check for errors on server's shutdown. # No need to check for errors on server's shutdown.
- (*net/http.Server).Shutdown - (*net/http.Server).Shutdown
# Never check for logger errors.
- (github.com/go-kit/log.Logger).Log
# Never check for rollback errors as Rollback() is called when a previous error was detected. # Never check for rollback errors as Rollback() is called when a previous error was detected.
- (github.com/prometheus/prometheus/storage.Appender).Rollback - (github.com/prometheus/prometheus/storage.Appender).Rollback
goimports: goimports:

View file

@ -18,6 +18,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"math" "math"
"math/bits" "math/bits"
"net" "net"
@ -37,8 +38,6 @@ import (
"github.com/KimMachineGun/automemlimit/memlimit" "github.com/KimMachineGun/automemlimit/memlimit"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/mwitkow/go-conntrack" "github.com/mwitkow/go-conntrack"
"github.com/oklog/run" "github.com/oklog/run"
@ -46,8 +45,8 @@ import (
"github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/collectors"
versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version" versioncollector "github.com/prometheus/client_golang/prometheus/collectors/version"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promlog" "github.com/prometheus/common/promslog"
promlogflag "github.com/prometheus/common/promlog/flag" promslogflag "github.com/prometheus/common/promslog/flag"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
toolkit_web "github.com/prometheus/exporter-toolkit/web" toolkit_web "github.com/prometheus/exporter-toolkit/web"
"go.uber.org/atomic" "go.uber.org/atomic"
@ -81,6 +80,45 @@ import (
"github.com/prometheus/prometheus/web" "github.com/prometheus/prometheus/web"
) )
// klogv1OutputCallDepth is the stack depth where we can find the origin of this call.
const klogv1OutputCallDepth = 6
// klogv1DefaultPrefixLength is the length of the log prefix that we have to strip out.
const klogv1DefaultPrefixLength = 53
// klogv1Writer is used in SetOutputBySeverity call below to redirect any calls
// to klogv1 to end up in klogv2.
// This is a hack to support klogv1 without use of go-kit/log. It is inspired
// by klog's upstream klogv1/v2 coexistence example:
// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go
type klogv1Writer struct{}
// Write redirects klogv1 calls to klogv2.
// This is a hack to support klogv1 without use of go-kit/log. It is inspired
// by klog's upstream klogv1/v2 coexistence example:
// https://github.com/kubernetes/klog/blob/main/examples/coexist_klog_v1_and_v2/coexist_klog_v1_and_v2.go
func (kw klogv1Writer) Write(p []byte) (n int, err error) {
if len(p) < klogv1DefaultPrefixLength {
klogv2.InfoDepth(klogv1OutputCallDepth, string(p))
return len(p), nil
}
switch p[0] {
case 'I':
klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
case 'W':
klogv2.WarningDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
case 'E':
klogv2.ErrorDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
case 'F':
klogv2.FatalDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
default:
klogv2.InfoDepth(klogv1OutputCallDepth, string(p[klogv1DefaultPrefixLength:]))
}
return len(p), nil
}
var ( var (
appName = "prometheus" appName = "prometheus"
@ -171,82 +209,82 @@ type flagConfig struct {
prometheusURL string prometheusURL string
corsRegexString string corsRegexString string
promlogConfig promlog.Config
promqlEnableDelayedNameRemoval bool promqlEnableDelayedNameRemoval bool
promslogConfig promslog.Config
} }
// setFeatureListOptions sets the corresponding options from the featureList. // setFeatureListOptions sets the corresponding options from the featureList.
func (c *flagConfig) setFeatureListOptions(logger log.Logger) error { func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
for _, f := range c.featureList { for _, f := range c.featureList {
opts := strings.Split(f, ",") opts := strings.Split(f, ",")
for _, o := range opts { for _, o := range opts {
switch o { switch o {
case "expand-external-labels": case "expand-external-labels":
c.enableExpandExternalLabels = true c.enableExpandExternalLabels = true
level.Info(logger).Log("msg", "Experimental expand-external-labels enabled") logger.Info("Experimental expand-external-labels enabled")
case "exemplar-storage": case "exemplar-storage":
c.tsdb.EnableExemplarStorage = true c.tsdb.EnableExemplarStorage = true
level.Info(logger).Log("msg", "Experimental in-memory exemplar storage enabled") logger.Info("Experimental in-memory exemplar storage enabled")
case "memory-snapshot-on-shutdown": case "memory-snapshot-on-shutdown":
c.tsdb.EnableMemorySnapshotOnShutdown = true c.tsdb.EnableMemorySnapshotOnShutdown = true
level.Info(logger).Log("msg", "Experimental memory snapshot on shutdown enabled") logger.Info("Experimental memory snapshot on shutdown enabled")
case "extra-scrape-metrics": case "extra-scrape-metrics":
c.scrape.ExtraMetrics = true c.scrape.ExtraMetrics = true
level.Info(logger).Log("msg", "Experimental additional scrape metrics enabled") logger.Info("Experimental additional scrape metrics enabled")
case "metadata-wal-records": case "metadata-wal-records":
c.scrape.AppendMetadata = true c.scrape.AppendMetadata = true
level.Info(logger).Log("msg", "Experimental metadata records in WAL enabled, required for remote write 2.0") logger.Info("Experimental metadata records in WAL enabled, required for remote write 2.0")
case "promql-per-step-stats": case "promql-per-step-stats":
c.enablePerStepStats = true c.enablePerStepStats = true
level.Info(logger).Log("msg", "Experimental per-step statistics reporting") logger.Info("Experimental per-step statistics reporting")
case "auto-gomaxprocs": case "auto-gomaxprocs":
c.enableAutoGOMAXPROCS = true c.enableAutoGOMAXPROCS = true
level.Info(logger).Log("msg", "Automatically set GOMAXPROCS to match Linux container CPU quota") logger.Info("Automatically set GOMAXPROCS to match Linux container CPU quota")
case "auto-reload-config": case "auto-reload-config":
c.enableAutoReload = true c.enableAutoReload = true
if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 { if s := time.Duration(c.autoReloadInterval).Seconds(); s > 0 && s < 1 {
c.autoReloadInterval, _ = model.ParseDuration("1s") c.autoReloadInterval, _ = model.ParseDuration("1s")
} }
level.Info(logger).Log("msg", fmt.Sprintf("Enabled automatic configuration file reloading. Checking for configuration changes every %s.", c.autoReloadInterval)) logger.Info("Enabled automatic configuration file reloading. Checking for configuration changes every", "interval", c.autoReloadInterval)
case "auto-gomemlimit": case "auto-gomemlimit":
c.enableAutoGOMEMLIMIT = true c.enableAutoGOMEMLIMIT = true
level.Info(logger).Log("msg", "Automatically set GOMEMLIMIT to match Linux container or system memory limit") logger.Info("Automatically set GOMEMLIMIT to match Linux container or system memory limit")
case "concurrent-rule-eval": case "concurrent-rule-eval":
c.enableConcurrentRuleEval = true c.enableConcurrentRuleEval = true
level.Info(logger).Log("msg", "Experimental concurrent rule evaluation enabled.") logger.Info("Experimental concurrent rule evaluation enabled.")
case "promql-experimental-functions": case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true parser.EnableExperimentalFunctions = true
level.Info(logger).Log("msg", "Experimental PromQL functions enabled.") logger.Info("Experimental PromQL functions enabled.")
case "native-histograms": case "native-histograms":
c.tsdb.EnableNativeHistograms = true c.tsdb.EnableNativeHistograms = true
c.scrape.EnableNativeHistogramsIngestion = true c.scrape.EnableNativeHistogramsIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) logger.Info("Experimental native histogram support enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "ooo-native-histograms": case "ooo-native-histograms":
c.tsdb.EnableOOONativeHistograms = true c.tsdb.EnableOOONativeHistograms = true
level.Info(logger).Log("msg", "Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true") logger.Info("Experimental out-of-order native histogram ingestion enabled. This will only take effect if OutOfOrderTimeWindow is > 0 and if EnableNativeHistograms = true")
case "created-timestamp-zero-ingestion": case "created-timestamp-zero-ingestion":
c.scrape.EnableCreatedTimestampZeroIngestion = true c.scrape.EnableCreatedTimestampZeroIngestion = true
// Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers. // Change relevant global variables. Hacky, but it's hard to pass a new option or default to unmarshallers.
config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultConfig.GlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols config.DefaultGlobalConfig.ScrapeProtocols = config.DefaultProtoFirstScrapeProtocols
level.Info(logger).Log("msg", "Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols)) logger.Info("Experimental created timestamp zero ingestion enabled. Changed default scrape_protocols to prefer PrometheusProto format.", "global.scrape_protocols", fmt.Sprintf("%v", config.DefaultGlobalConfig.ScrapeProtocols))
case "delayed-compaction": case "delayed-compaction":
c.tsdb.EnableDelayedCompaction = true c.tsdb.EnableDelayedCompaction = true
level.Info(logger).Log("msg", "Experimental delayed compaction is enabled.") logger.Info("Experimental delayed compaction is enabled.")
case "promql-delayed-name-removal": case "promql-delayed-name-removal":
c.promqlEnableDelayedNameRemoval = true c.promqlEnableDelayedNameRemoval = true
level.Info(logger).Log("msg", "Experimental PromQL delayed name removal enabled.") logger.Info("Experimental PromQL delayed name removal enabled.")
case "": case "":
continue continue
case "old-ui": case "old-ui":
c.web.UseOldUI = true c.web.UseOldUI = true
level.Info(logger).Log("msg", "Serving previous version of the Prometheus web UI.") logger.Info("Serving previous version of the Prometheus web UI.")
default: default:
level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o) logger.Warn("Unknown option for --enable-feature", "option", o)
} }
} }
} }
@ -280,7 +318,7 @@ func main() {
Registerer: prometheus.DefaultRegisterer, Registerer: prometheus.DefaultRegisterer,
Gatherer: prometheus.DefaultGatherer, Gatherer: prometheus.DefaultGatherer,
}, },
promlogConfig: promlog.Config{}, promslogConfig: promslog.Config{},
} }
a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout) a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
@ -483,7 +521,7 @@ func main() {
a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode) a.Flag("agent", "Run Prometheus in 'Agent mode'.").BoolVar(&agentMode)
promlogflag.AddFlags(a, &cfg.promlogConfig) promslogflag.AddFlags(a, &cfg.promslogConfig)
a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error { a.Flag("write-documentation", "Generate command line documentation. Internal use.").Hidden().Action(func(ctx *kingpin.ParseContext) error {
if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil { if err := documentcli.GenerateMarkdown(a.Model(), os.Stdout); err != nil {
@ -501,7 +539,8 @@ func main() {
os.Exit(2) os.Exit(2)
} }
logger := promlog.New(&cfg.promlogConfig) logger := promslog.New(&cfg.promslogConfig)
slog.SetDefault(logger)
notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer) notifs := notifications.NewNotifications(cfg.maxNotificationsSubscribers, prometheus.DefaultRegisterer)
cfg.web.NotificationsSub = notifs.Sub cfg.web.NotificationsSub = notifs.Sub
@ -556,12 +595,12 @@ func main() {
// Throw error for invalid config before starting other components. // Throw error for invalid config before starting other components.
var cfgFile *config.Config var cfgFile *config.Config
if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil { if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, promslog.NewNopLogger()); err != nil {
absPath, pathErr := filepath.Abs(cfg.configFile) absPath, pathErr := filepath.Abs(cfg.configFile)
if pathErr != nil { if pathErr != nil {
absPath = cfg.configFile absPath = cfg.configFile
} }
level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err) logger.Error(fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2) os.Exit(2)
} }
if _, err := cfgFile.GetScrapeConfigs(); err != nil { if _, err := cfgFile.GetScrapeConfigs(); err != nil {
@ -569,7 +608,7 @@ func main() {
if pathErr != nil { if pathErr != nil {
absPath = cfg.configFile absPath = cfg.configFile
} }
level.Error(logger).Log("msg", fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err) logger.Error(fmt.Sprintf("Error loading scrape config files from config (--config.file=%q)", cfg.configFile), "file", absPath, "err", err)
os.Exit(2) os.Exit(2)
} }
if cfg.tsdb.EnableExemplarStorage { if cfg.tsdb.EnableExemplarStorage {
@ -602,7 +641,7 @@ func main() {
if !agentMode { if !agentMode {
if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 { if cfg.tsdb.RetentionDuration == 0 && cfg.tsdb.MaxBytes == 0 {
cfg.tsdb.RetentionDuration = defaultRetentionDuration cfg.tsdb.RetentionDuration = defaultRetentionDuration
level.Info(logger).Log("msg", "No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration) logger.Info("No time or size retention was set so using the default time retention", "duration", defaultRetentionDuration)
} }
// Check for overflows. This limits our max retention to 100y. // Check for overflows. This limits our max retention to 100y.
@ -612,7 +651,7 @@ func main() {
panic(err) panic(err)
} }
cfg.tsdb.RetentionDuration = y cfg.tsdb.RetentionDuration = y
level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String()) logger.Warn("Time retention value is too high. Limiting to: " + y.String())
} }
// Max block size settings. // Max block size settings.
@ -633,11 +672,8 @@ func main() {
noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{} noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval) noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval)
// Above level 6, the k8s client would log bearer tokens in clear-text. klogv2.SetSlogLogger(logger.With("component", "k8s_client_runtime"))
klog.ClampLevel(6) klog.SetOutputBySeverity("INFO", klogv1Writer{})
klog.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
klogv2.ClampLevel(6)
klogv2.SetLogger(log.With(logger, "component", "k8s_client_runtime"))
modeAppName := "Prometheus Server" modeAppName := "Prometheus Server"
mode := "server" mode := "server"
@ -646,20 +682,22 @@ func main() {
mode = "agent" mode = "agent"
} }
level.Info(logger).Log("msg", "Starting "+modeAppName, "mode", mode, "version", version.Info()) logger.Info("Starting "+modeAppName, "mode", mode, "version", version.Info())
if bits.UintSize < 64 { if bits.UintSize < 64 {
level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH) logger.Warn("This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
} }
level.Info(logger).Log("build_context", version.BuildContext()) logger.Info("operational information",
level.Info(logger).Log("host_details", prom_runtime.Uname()) "build_context", version.BuildContext(),
level.Info(logger).Log("fd_limits", prom_runtime.FdLimits()) "host_details", prom_runtime.Uname(),
level.Info(logger).Log("vm_limits", prom_runtime.VMLimits()) "fd_limits", prom_runtime.FdLimits(),
"vm_limits", prom_runtime.VMLimits(),
)
var ( var (
localStorage = &readyStorage{stats: tsdb.NewDBStats()} localStorage = &readyStorage{stats: tsdb.NewDBStats()}
scraper = &readyScrapeManager{} scraper = &readyScrapeManager{}
remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata) remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper, cfg.scrape.AppendMetadata)
fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
) )
@ -667,7 +705,7 @@ func main() {
ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxWeb, cancelWeb = context.WithCancel(context.Background())
ctxRule = context.Background() ctxRule = context.Background()
notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier"))
ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxScrape, cancelScrape = context.WithCancel(context.Background())
ctxNotify, cancelNotify = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background())
@ -682,37 +720,37 @@ func main() {
// they are not specific to an SD instance. // they are not specific to an SD instance.
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer) err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "failed to register Kubernetes client metrics", "err", err) logger.Error("failed to register Kubernetes client metrics", "err", err)
os.Exit(1) os.Exit(1)
} }
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer) sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) logger.Error("failed to register service discovery metrics", "err", err)
os.Exit(1) os.Exit(1)
} }
discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape"))
if discoveryManagerScrape == nil { if discoveryManagerScrape == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager scrape") logger.Error("failed to create a discovery manager scrape")
os.Exit(1) os.Exit(1)
} }
discoveryManagerNotify = discovery.NewManager(ctxNotify, log.With(logger, "component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify"))
if discoveryManagerNotify == nil { if discoveryManagerNotify == nil {
level.Error(logger).Log("msg", "failed to create a discovery manager notify") logger.Error("failed to create a discovery manager notify")
os.Exit(1) os.Exit(1)
} }
scrapeManager, err := scrape.NewManager( scrapeManager, err := scrape.NewManager(
&cfg.scrape, &cfg.scrape,
log.With(logger, "component", "scrape manager"), logger.With("component", "scrape manager"),
func(s string) (log.Logger, error) { return logging.NewJSONFileLogger(s) }, logging.NewJSONFileLogger,
fanoutStorage, fanoutStorage,
prometheus.DefaultRegisterer, prometheus.DefaultRegisterer,
) )
if err != nil { if err != nil {
level.Error(logger).Log("msg", "failed to create a scrape manager", "err", err) logger.Error("failed to create a scrape manager", "err", err)
os.Exit(1) os.Exit(1)
} }
@ -725,10 +763,10 @@ func main() {
if cfg.enableAutoGOMAXPROCS { if cfg.enableAutoGOMAXPROCS {
l := func(format string, a ...interface{}) { l := func(format string, a ...interface{}) {
level.Info(logger).Log("component", "automaxprocs", "msg", fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...)) logger.Info(fmt.Sprintf(strings.TrimPrefix(format, "maxprocs: "), a...), "component", "automaxprocs")
} }
if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil { if _, err := maxprocs.Set(maxprocs.Logger(l)); err != nil {
level.Warn(logger).Log("component", "automaxprocs", "msg", "Failed to set GOMAXPROCS automatically", "err", err) logger.Warn("Failed to set GOMAXPROCS automatically", "component", "automaxprocs", "err", err)
} }
} }
@ -742,17 +780,17 @@ func main() {
), ),
), ),
); err != nil { ); err != nil {
level.Warn(logger).Log("component", "automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err) logger.Warn("automemlimit", "msg", "Failed to set GOMEMLIMIT automatically", "err", err)
} }
} }
if !agentMode { if !agentMode {
opts := promql.EngineOpts{ opts := promql.EngineOpts{
Logger: log.With(logger, "component", "query engine"), Logger: logger.With("component", "query engine"),
Reg: prometheus.DefaultRegisterer, Reg: prometheus.DefaultRegisterer,
MaxSamples: cfg.queryMaxSamples, MaxSamples: cfg.queryMaxSamples,
Timeout: time.Duration(cfg.queryTimeout), Timeout: time.Duration(cfg.queryTimeout),
ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")), ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")),
LookbackDelta: time.Duration(cfg.lookbackDelta), LookbackDelta: time.Duration(cfg.lookbackDelta),
NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
// EnableAtModifier and EnableNegativeOffset have to be // EnableAtModifier and EnableNegativeOffset have to be
@ -773,7 +811,7 @@ func main() {
Context: ctxRule, Context: ctxRule,
ExternalURL: cfg.web.ExternalURL, ExternalURL: cfg.web.ExternalURL,
Registerer: prometheus.DefaultRegisterer, Registerer: prometheus.DefaultRegisterer,
Logger: log.With(logger, "component", "rule manager"), Logger: logger.With("component", "rule manager"),
OutageTolerance: time.Duration(cfg.outageTolerance), OutageTolerance: time.Duration(cfg.outageTolerance),
ForGracePeriod: time.Duration(cfg.forGracePeriod), ForGracePeriod: time.Duration(cfg.forGracePeriod),
ResendDelay: time.Duration(cfg.resendDelay), ResendDelay: time.Duration(cfg.resendDelay),
@ -824,7 +862,7 @@ func main() {
} }
// Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager. // Depends on cfg.web.ScrapeManager so needs to be after cfg.web.ScrapeManager = scrapeManager.
webHandler := web.New(log.With(logger, "component", "web"), &cfg.web) webHandler := web.New(logger.With("component", "web"), &cfg.web)
// Monitor outgoing connections on default transport with conntrack. // Monitor outgoing connections on default transport with conntrack.
http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc( http.DefaultTransport.(*http.Transport).DialContext = conntrack.NewDialContextFunc(
@ -951,18 +989,18 @@ func main() {
listeners, err := webHandler.Listeners() listeners, err := webHandler.Listeners()
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Unable to start web listeners", "err", err) logger.Error("Unable to start web listener", "err", err)
if err := queryEngine.Close(); err != nil { if err := queryEngine.Close(); err != nil {
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) logger.Warn("Closing query engine failed", "err", err)
} }
os.Exit(1) os.Exit(1)
} }
err = toolkit_web.Validate(*webConfig) err = toolkit_web.Validate(*webConfig)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Unable to validate web configuration file", "err", err) logger.Error("Unable to validate web configuration file", "err", err)
if err := queryEngine.Close(); err != nil { if err := queryEngine.Close(); err != nil {
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) logger.Warn("Closing query engine failed", "err", err)
} }
os.Exit(1) os.Exit(1)
} }
@ -978,15 +1016,15 @@ func main() {
// Don't forget to release the reloadReady channel so that waiting blocks can exit normally. // Don't forget to release the reloadReady channel so that waiting blocks can exit normally.
select { select {
case sig := <-term: case sig := <-term:
level.Warn(logger).Log("msg", "Received an OS signal, exiting gracefully...", "signal", sig.String()) logger.Warn("Received an OS signal, exiting gracefully...", "signal", sig.String())
reloadReady.Close() reloadReady.Close()
case <-webHandler.Quit(): case <-webHandler.Quit():
level.Warn(logger).Log("msg", "Received termination request via web service, exiting gracefully...") logger.Warn("Received termination request via web service, exiting gracefully...")
case <-cancel: case <-cancel:
reloadReady.Close() reloadReady.Close()
} }
if err := queryEngine.Close(); err != nil { if err := queryEngine.Close(); err != nil {
level.Warn(logger).Log("msg", "Closing query engine failed", "err", err) logger.Warn("Closing query engine failed", "err", err)
} }
return nil return nil
}, },
@ -1002,11 +1040,11 @@ func main() {
g.Add( g.Add(
func() error { func() error {
err := discoveryManagerScrape.Run() err := discoveryManagerScrape.Run()
level.Info(logger).Log("msg", "Scrape discovery manager stopped") logger.Info("Scrape discovery manager stopped")
return err return err
}, },
func(err error) { func(err error) {
level.Info(logger).Log("msg", "Stopping scrape discovery manager...") logger.Info("Stopping scrape discovery manager...")
cancelScrape() cancelScrape()
}, },
) )
@ -1016,11 +1054,11 @@ func main() {
g.Add( g.Add(
func() error { func() error {
err := discoveryManagerNotify.Run() err := discoveryManagerNotify.Run()
level.Info(logger).Log("msg", "Notify discovery manager stopped") logger.Info("Notify discovery manager stopped")
return err return err
}, },
func(err error) { func(err error) {
level.Info(logger).Log("msg", "Stopping notify discovery manager...") logger.Info("Stopping notify discovery manager...")
cancelNotify() cancelNotify()
}, },
) )
@ -1049,7 +1087,7 @@ func main() {
<-reloadReady.C <-reloadReady.C
err := scrapeManager.Run(discoveryManagerScrape.SyncCh()) err := scrapeManager.Run(discoveryManagerScrape.SyncCh())
level.Info(logger).Log("msg", "Scrape manager stopped") logger.Info("Scrape manager stopped")
return err return err
}, },
func(err error) { func(err error) {
@ -1057,7 +1095,7 @@ func main() {
// so that it doesn't try to write samples to a closed storage. // so that it doesn't try to write samples to a closed storage.
// We should also wait for rule manager to be fully stopped to ensure // We should also wait for rule manager to be fully stopped to ensure
// we don't trigger any false positive alerts for rules using absent(). // we don't trigger any false positive alerts for rules using absent().
level.Info(logger).Log("msg", "Stopping scrape manager...") logger.Info("Stopping scrape manager...")
scrapeManager.Stop() scrapeManager.Stop()
}, },
) )
@ -1088,7 +1126,7 @@ func main() {
if cfg.enableAutoReload { if cfg.enableAutoReload {
checksum, err = config.GenerateChecksum(cfg.configFile) checksum, err = config.GenerateChecksum(cfg.configFile)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to generate initial checksum for configuration file", "err", err) logger.Error("Failed to generate initial checksum for configuration file", "err", err)
} }
} }
@ -1108,17 +1146,17 @@ func main() {
select { select {
case <-hup: case <-hup:
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err) logger.Error("Error reloading config", "err", err)
} else if cfg.enableAutoReload { } else if cfg.enableAutoReload {
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
checksum = currentChecksum checksum = currentChecksum
} else { } else {
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) logger.Error("Failed to generate checksum during configuration reload", "err", err)
} }
} }
case rc := <-webHandler.Reload(): case rc := <-webHandler.Reload():
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err) logger.Error("Error reloading config", "err", err)
rc <- err rc <- err
} else { } else {
rc <- nil rc <- nil
@ -1126,7 +1164,7 @@ func main() {
if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil { if currentChecksum, err := config.GenerateChecksum(cfg.configFile); err == nil {
checksum = currentChecksum checksum = currentChecksum
} else { } else {
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) logger.Error("Failed to generate checksum during configuration reload", "err", err)
} }
} }
} }
@ -1136,14 +1174,14 @@ func main() {
} }
currentChecksum, err := config.GenerateChecksum(cfg.configFile) currentChecksum, err := config.GenerateChecksum(cfg.configFile)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to generate checksum during configuration reload", "err", err) logger.Error("Failed to generate checksum during configuration reload", "err", err)
} else if currentChecksum == checksum { } else if currentChecksum == checksum {
continue continue
} }
level.Info(logger).Log("msg", "Configuration file change detected, reloading the configuration.") logger.Info("Configuration file change detected, reloading the configuration.")
if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil { if err := reloadConfig(cfg.configFile, cfg.enableExpandExternalLabels, cfg.tsdb.EnableExemplarStorage, logger, noStepSubqueryInterval, callback, reloaders...); err != nil {
level.Error(logger).Log("msg", "Error reloading config", "err", err) logger.Error("Error reloading config", "err", err)
} else { } else {
checksum = currentChecksum checksum = currentChecksum
} }
@ -1180,7 +1218,7 @@ func main() {
webHandler.SetReady(web.Ready) webHandler.SetReady(web.Ready)
notifs.DeleteNotification(notifications.StartingUp) notifs.DeleteNotification(notifications.StartingUp)
level.Info(logger).Log("msg", "Server is ready to receive web requests.") logger.Info("Server is ready to receive web requests.")
<-cancel <-cancel
return nil return nil
}, },
@ -1195,7 +1233,7 @@ func main() {
cancel := make(chan struct{}) cancel := make(chan struct{})
g.Add( g.Add(
func() error { func() error {
level.Info(logger).Log("msg", "Starting TSDB ...") logger.Info("Starting TSDB ...")
if cfg.tsdb.WALSegmentSize != 0 { if cfg.tsdb.WALSegmentSize != 0 {
if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 { if cfg.tsdb.WALSegmentSize < 10*1024*1024 || cfg.tsdb.WALSegmentSize > 256*1024*1024 {
return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB") return errors.New("flag 'storage.tsdb.wal-segment-size' must be set between 10MB and 256MB")
@ -1214,13 +1252,13 @@ func main() {
switch fsType := prom_runtime.Statfs(localStoragePath); fsType { switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
case "NFS_SUPER_MAGIC": case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") logger.Warn("This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.", "fs_type", fsType)
default: default:
level.Info(logger).Log("fs_type", fsType) logger.Info("filesystem information", "fs_type", fsType)
} }
level.Info(logger).Log("msg", "TSDB started") logger.Info("TSDB started")
level.Debug(logger).Log("msg", "TSDB options", logger.Debug("TSDB options",
"MinBlockDuration", cfg.tsdb.MinBlockDuration, "MinBlockDuration", cfg.tsdb.MinBlockDuration,
"MaxBlockDuration", cfg.tsdb.MaxBlockDuration, "MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
"MaxBytes", cfg.tsdb.MaxBytes, "MaxBytes", cfg.tsdb.MaxBytes,
@ -1239,7 +1277,7 @@ func main() {
}, },
func(err error) { func(err error) {
if err := fanoutStorage.Close(); err != nil { if err := fanoutStorage.Close(); err != nil {
level.Error(logger).Log("msg", "Error stopping storage", "err", err) logger.Error("Error stopping storage", "err", err)
} }
close(cancel) close(cancel)
}, },
@ -1251,7 +1289,7 @@ func main() {
cancel := make(chan struct{}) cancel := make(chan struct{})
g.Add( g.Add(
func() error { func() error {
level.Info(logger).Log("msg", "Starting WAL storage ...") logger.Info("Starting WAL storage ...")
if cfg.agent.WALSegmentSize != 0 { if cfg.agent.WALSegmentSize != 0 {
if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 { if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 {
return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB") return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB")
@ -1270,13 +1308,13 @@ func main() {
switch fsType := prom_runtime.Statfs(localStoragePath); fsType { switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
case "NFS_SUPER_MAGIC": case "NFS_SUPER_MAGIC":
level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.") logger.Warn(fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
default: default:
level.Info(logger).Log("fs_type", fsType) logger.Info(fsType)
} }
level.Info(logger).Log("msg", "Agent WAL storage started") logger.Info("Agent WAL storage started")
level.Debug(logger).Log("msg", "Agent WAL storage options", logger.Debug("Agent WAL storage options",
"WALSegmentSize", cfg.agent.WALSegmentSize, "WALSegmentSize", cfg.agent.WALSegmentSize,
"WALCompression", cfg.agent.WALCompression, "WALCompression", cfg.agent.WALCompression,
"StripeSize", cfg.agent.StripeSize, "StripeSize", cfg.agent.StripeSize,
@ -1294,7 +1332,7 @@ func main() {
}, },
func(e error) { func(e error) {
if err := fanoutStorage.Close(); err != nil { if err := fanoutStorage.Close(); err != nil {
level.Error(logger).Log("msg", "Error stopping storage", "err", err) logger.Error("Error stopping storage", "err", err)
} }
close(cancel) close(cancel)
}, },
@ -1328,7 +1366,7 @@ func main() {
<-reloadReady.C <-reloadReady.C
notifierManager.Run(discoveryManagerNotify.SyncCh()) notifierManager.Run(discoveryManagerNotify.SyncCh())
level.Info(logger).Log("msg", "Notifier manager stopped") logger.Info("Notifier manager stopped")
return nil return nil
}, },
func(err error) { func(err error) {
@ -1337,16 +1375,16 @@ func main() {
) )
} }
if err := g.Run(); err != nil { if err := g.Run(); err != nil {
level.Error(logger).Log("err", err) logger.Error("Error running goroutines from run.Group", "err", err)
os.Exit(1) os.Exit(1)
} }
level.Info(logger).Log("msg", "See you next time!") logger.Info("See you next time!")
} }
func openDBWithMetrics(dir string, logger log.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) { func openDBWithMetrics(dir string, logger *slog.Logger, reg prometheus.Registerer, opts *tsdb.Options, stats *tsdb.DBStats) (*tsdb.DB, error) {
db, err := tsdb.Open( db, err := tsdb.Open(
dir, dir,
log.With(logger, "component", "tsdb"), logger.With("component", "tsdb"),
reg, reg,
opts, opts,
stats, stats,
@ -1399,10 +1437,10 @@ type reloader struct {
reloader func(*config.Config) error reloader func(*config.Config) error
} }
func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) { func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger *slog.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, callback func(bool), rls ...reloader) (err error) {
start := time.Now() start := time.Now()
timings := []interface{}{} timingsLogger := logger
level.Info(logger).Log("msg", "Loading configuration file", "filename", filename) logger.Info("Loading configuration file", "filename", filename)
defer func() { defer func() {
if err == nil { if err == nil {
@ -1430,10 +1468,10 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
for _, rl := range rls { for _, rl := range rls {
rstart := time.Now() rstart := time.Now()
if err := rl.reloader(conf); err != nil { if err := rl.reloader(conf); err != nil {
level.Error(logger).Log("msg", "Failed to apply configuration", "err", err) logger.Error("Failed to apply configuration", "err", err)
failed = true failed = true
} }
timings = append(timings, rl.name, time.Since(rstart)) timingsLogger = timingsLogger.With((rl.name), time.Since(rstart))
} }
if failed { if failed {
return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename) return fmt.Errorf("one or more errors occurred while applying the new configuration (--config.file=%q)", filename)
@ -1441,7 +1479,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC) oldGoGC := debug.SetGCPercent(conf.Runtime.GoGC)
if oldGoGC != conf.Runtime.GoGC { if oldGoGC != conf.Runtime.GoGC {
level.Info(logger).Log("msg", "updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC) logger.Info("updated GOGC", "old", oldGoGC, "new", conf.Runtime.GoGC)
} }
// Write the new setting out to the ENV var for runtime API output. // Write the new setting out to the ENV var for runtime API output.
if conf.Runtime.GoGC >= 0 { if conf.Runtime.GoGC >= 0 {
@ -1451,8 +1489,7 @@ func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage b
} }
noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval) noStepSuqueryInterval.Set(conf.GlobalConfig.EvaluationInterval)
l := []interface{}{"msg", "Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start)} timingsLogger.Info("Completed loading of configuration file", "filename", filename, "totalDuration", time.Since(start))
level.Info(logger).Log(append(l, timings...)...)
return nil return nil
} }

View file

@ -31,9 +31,9 @@ import (
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
@ -295,7 +295,7 @@ func TestTimeMetrics(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil) db, err := openDBWithMetrics(tmpDir, promslog.NewNopLogger(), reg, nil, nil)
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {
require.NoError(t, db.Close()) require.NoError(t, db.Close())

View file

@ -442,6 +442,7 @@ func readQueryLog(t *testing.T, path string) []queryLogLine {
file, err := os.Open(path) file, err := os.Open(path)
require.NoError(t, err) require.NoError(t, err)
defer file.Close() defer file.Close()
scanner := bufio.NewScanner(file) scanner := bufio.NewScanner(file)
for scanner.Scan() { for scanner.Scan() {
var q queryLogLine var q queryLogLine

View file

@ -21,9 +21,10 @@ import (
"math" "math"
"time" "time"
"github.com/go-kit/log"
"github.com/oklog/ulid" "github.com/oklog/ulid"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/textparse" "github.com/prometheus/prometheus/model/textparse"
"github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb"
@ -120,7 +121,7 @@ func createBlocks(input []byte, mint, maxt, maxBlockDuration int64, maxSamplesIn
// also need to append samples throughout the whole block range. To allow that, we // also need to append samples throughout the whole block range. To allow that, we
// pretend that the block is twice as large here, but only really add sample in the // pretend that the block is twice as large here, but only really add sample in the
// original interval later. // original interval later.
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, 2*blockDuration) w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, 2*blockDuration)
if err != nil { if err != nil {
return fmt.Errorf("block writer: %w", err) return fmt.Errorf("block writer: %w", err)
} }

View file

@ -32,13 +32,13 @@ import (
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/google/pprof/profile" "github.com/google/pprof/profile"
"github.com/prometheus/client_golang/api" "github.com/prometheus/client_golang/api"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil/promlint" "github.com/prometheus/client_golang/prometheus/testutil/promlint"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/exporter-toolkit/web"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -575,7 +575,7 @@ func checkFileExists(fn string) error {
func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) { func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
fmt.Println("Checking", filename) fmt.Println("Checking", filename)
cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger()) cfg, err := config.LoadFile(filename, agentMode, false, promslog.NewNopLogger())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1182,7 +1182,7 @@ func importRules(url *url.URL, roundTripper http.RoundTripper, start, end, outpu
return fmt.Errorf("new api client error: %w", err) return fmt.Errorf("new api client error: %w", err)
} }
ruleImporter := newRuleImporter(log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), cfg, api) ruleImporter := newRuleImporter(promslog.New(&promslog.Config{}), cfg, api)
errs := ruleImporter.loadGroups(ctx, files) errs := ruleImporter.loadGroups(ctx, files)
for _, err := range errs { for _, err := range errs {
if err != nil { if err != nil {

View file

@ -16,12 +16,12 @@ package main
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
v1 "github.com/prometheus/client_golang/api/prometheus/v1" v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/timestamp"
@ -38,7 +38,7 @@ type queryRangeAPI interface {
} }
type ruleImporter struct { type ruleImporter struct {
logger log.Logger logger *slog.Logger
config ruleImporterConfig config ruleImporterConfig
apiClient queryRangeAPI apiClient queryRangeAPI
@ -57,8 +57,8 @@ type ruleImporterConfig struct {
// newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series // newRuleImporter creates a new rule importer that can be used to parse and evaluate recording rule files and create new series
// written to disk in blocks. // written to disk in blocks.
func newRuleImporter(logger log.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter { func newRuleImporter(logger *slog.Logger, config ruleImporterConfig, apiClient queryRangeAPI) *ruleImporter {
level.Info(logger).Log("backfiller", "new rule importer", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822)) logger.Info("new rule importer", "component", "backfiller", "start", config.start.Format(time.RFC822), "end", config.end.Format(time.RFC822))
return &ruleImporter{ return &ruleImporter{
logger: logger, logger: logger,
config: config, config: config,
@ -80,10 +80,10 @@ func (importer *ruleImporter) loadGroups(_ context.Context, filenames []string)
// importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks. // importAll evaluates all the recording rules and creates new time series and writes them to disk in blocks.
func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) { func (importer *ruleImporter) importAll(ctx context.Context) (errs []error) {
for name, group := range importer.groups { for name, group := range importer.groups {
level.Info(importer.logger).Log("backfiller", "processing group", "name", name) importer.logger.Info("processing group", "component", "backfiller", "name", name)
for i, r := range group.Rules() { for i, r := range group.Rules() {
level.Info(importer.logger).Log("backfiller", "processing rule", "id", i, "name", r.Name()) importer.logger.Info("processing rule", "component", "backfiller", "id", i, "name", r.Name())
if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil { if err := importer.importRule(ctx, r.Query().String(), r.Name(), r.Labels(), importer.config.start, importer.config.end, int64(importer.config.maxBlockDuration/time.Millisecond), group); err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
@ -124,7 +124,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
return fmt.Errorf("query range: %w", err) return fmt.Errorf("query range: %w", err)
} }
if warnings != nil { if warnings != nil {
level.Warn(importer.logger).Log("msg", "Range query returned warnings.", "warnings", warnings) importer.logger.Warn("Range query returned warnings.", "warnings", warnings)
} }
// To prevent races with compaction, a block writer only allows appending samples // To prevent races with compaction, a block writer only allows appending samples
@ -133,7 +133,7 @@ func (importer *ruleImporter) importRule(ctx context.Context, ruleExpr, ruleName
// also need to append samples throughout the whole block range. To allow that, we // also need to append samples throughout the whole block range. To allow that, we
// pretend that the block is twice as large here, but only really add sample in the // pretend that the block is twice as large here, but only really add sample in the
// original interval later. // original interval later.
w, err := tsdb.NewBlockWriter(log.NewNopLogger(), importer.config.outputDir, 2*blockDuration) w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), importer.config.outputDir, 2*blockDuration)
if err != nil { if err != nil {
return fmt.Errorf("new block writer: %w", err) return fmt.Errorf("new block writer: %w", err)
} }

View file

@ -21,9 +21,9 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
v1 "github.com/prometheus/client_golang/api/prometheus/v1" v1 "github.com/prometheus/client_golang/api/prometheus/v1"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -161,7 +161,7 @@ func TestBackfillRuleIntegration(t *testing.T) {
} }
func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) { func newTestRuleImporter(_ context.Context, start time.Time, tmpDir string, testSamples model.Matrix, maxBlockDuration time.Duration) (*ruleImporter, error) {
logger := log.NewNopLogger() logger := promslog.NewNopLogger()
cfg := ruleImporterConfig{ cfg := ruleImporterConfig{
outputDir: tmpDir, outputDir: tmpDir,
start: start.Add(-10 * time.Hour), start: start.Add(-10 * time.Hour),

View file

@ -20,9 +20,9 @@ import (
"os" "os"
"time" "time"
"github.com/go-kit/log"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -39,7 +39,7 @@ type sdCheckResult struct {
// CheckSD performs service discovery for the given job name and reports the results. // CheckSD performs service discovery for the given job name and reports the results.
func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int { func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration, registerer prometheus.Registerer) int {
logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) logger := promslog.New(&promslog.Config{})
cfg, err := config.LoadFile(sdConfigFiles, false, false, logger) cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
if err != nil { if err != nil {

View file

@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -32,9 +33,10 @@ import (
"time" "time"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log"
"go.uber.org/atomic" "go.uber.org/atomic"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage"
@ -60,7 +62,7 @@ type writeBenchmark struct {
memprof *os.File memprof *os.File
blockprof *os.File blockprof *os.File
mtxprof *os.File mtxprof *os.File
logger log.Logger logger *slog.Logger
} }
func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error { func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) error {
@ -68,7 +70,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
outPath: outPath, outPath: outPath,
samplesFile: samplesFile, samplesFile: samplesFile,
numMetrics: numMetrics, numMetrics: numMetrics,
logger: log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)), logger: promslog.New(&promslog.Config{}),
} }
if b.outPath == "" { if b.outPath == "" {
dir, err := os.MkdirTemp("", "tsdb_bench") dir, err := os.MkdirTemp("", "tsdb_bench")
@ -87,9 +89,7 @@ func benchmarkWrite(outPath, samplesFile string, numMetrics, numScrapes int) err
dir := filepath.Join(b.outPath, "storage") dir := filepath.Join(b.outPath, "storage")
l := log.With(b.logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) st, err := tsdb.Open(dir, b.logger, nil, &tsdb.Options{
st, err := tsdb.Open(dir, l, nil, &tsdb.Options{
RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond), RetentionDuration: int64(15 * 24 * time.Hour / time.Millisecond),
MinBlockDuration: int64(2 * time.Hour / time.Millisecond), MinBlockDuration: int64(2 * time.Hour / time.Millisecond),
}, tsdb.NewDBStats()) }, tsdb.NewDBStats())

View file

@ -26,13 +26,13 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/nsf/jsondiff" "github.com/nsf/jsondiff"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -218,7 +218,7 @@ func (tg *testGroup) test(evalInterval time.Duration, groupOrderMap map[string]i
Appendable: suite.Storage(), Appendable: suite.Storage(),
Context: context.Background(), Context: context.Background(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {}, NotifyFunc: func(ctx context.Context, expr string, alerts ...*rules.Alert) {},
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
m := rules.NewManager(opts) m := rules.NewManager(opts)
groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...) groupsMap, ers := m.LoadGroups(time.Duration(tg.Interval), tg.ExternalLabels, tg.ExternalURL, nil, ruleFiles...)

View file

@ -16,6 +16,7 @@ package config
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/url" "net/url"
"os" "os"
"path/filepath" "path/filepath"
@ -25,8 +26,6 @@ import (
"time" "time"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -73,7 +72,7 @@ const (
) )
// Load parses the YAML input s into a Config. // Load parses the YAML input s into a Config.
func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { func Load(s string, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
cfg := &Config{} cfg := &Config{}
// If the entire config body is empty the UnmarshalYAML method is // If the entire config body is empty the UnmarshalYAML method is
// never called. We thus have to set the DefaultConfig at the entry // never called. We thus have to set the DefaultConfig at the entry
@ -98,11 +97,11 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
if v := os.Getenv(s); v != "" { if v := os.Getenv(s); v != "" {
return v return v
} }
level.Warn(logger).Log("msg", "Empty environment variable", "name", s) logger.Warn("Empty environment variable", "name", s)
return "" return ""
}) })
if newV != v.Value { if newV != v.Value {
level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV)
} }
// Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024
b.Add(v.Name, newV) b.Add(v.Name, newV)
@ -112,7 +111,7 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro
} }
// LoadFile parses the given YAML file into a Config. // LoadFile parses the given YAML file into a Config.
func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { func LoadFile(filename string, agentMode, expandExternalLabels bool, logger *slog.Logger) (*Config, error) {
content, err := os.ReadFile(filename) content, err := os.ReadFile(filename)
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -24,10 +24,10 @@ import (
"time" "time"
"github.com/alecthomas/units" "github.com/alecthomas/units"
"github.com/go-kit/log"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -1501,7 +1501,7 @@ var expectedConf = &Config{
} }
func TestYAMLRoundtrip(t *testing.T) { func TestYAMLRoundtrip(t *testing.T) {
want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger()) want, err := LoadFile("testdata/roundtrip.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -1514,7 +1514,7 @@ func TestYAMLRoundtrip(t *testing.T) {
} }
func TestRemoteWriteRetryOnRateLimit(t *testing.T) { func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger()) want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -1529,7 +1529,7 @@ func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
func TestOTLPSanitizeResourceAttributes(t *testing.T) { func TestOTLPSanitizeResourceAttributes(t *testing.T) {
t.Run("good config", func(t *testing.T) { t.Run("good config", func(t *testing.T) {
want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, log.NewNopLogger()) want, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.good.yml"), false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -1541,7 +1541,7 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
}) })
t.Run("bad config", func(t *testing.T) { t.Run("bad config", func(t *testing.T) {
_, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, log.NewNopLogger()) _, err := LoadFile(filepath.Join("testdata", "otlp_sanitize_resource_attributes.bad.yml"), false, false, promslog.NewNopLogger())
require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`) require.ErrorContains(t, err, `duplicated promoted OTel resource attribute "k8s.job.name"`)
require.ErrorContains(t, err, `empty promoted OTel resource attribute`) require.ErrorContains(t, err, `empty promoted OTel resource attribute`)
}) })
@ -1550,16 +1550,16 @@ func TestOTLPSanitizeResourceAttributes(t *testing.T) {
func TestLoadConfig(t *testing.T) { func TestLoadConfig(t *testing.T) {
// Parse a valid file that sets a global scrape timeout. This tests whether parsing // Parse a valid file that sets a global scrape timeout. This tests whether parsing
// an overwritten default field in the global config permanently changes the default. // an overwritten default field in the global config permanently changes the default.
_, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger()) _, err := LoadFile("testdata/global_timeout.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, expectedConf, c) require.Equal(t, expectedConf, c)
} }
func TestScrapeIntervalLarger(t *testing.T) { func TestScrapeIntervalLarger(t *testing.T) {
c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger()) c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, c.ScrapeConfigs, 1) require.Len(t, c.ScrapeConfigs, 1)
for _, sc := range c.ScrapeConfigs { for _, sc := range c.ScrapeConfigs {
@ -1569,7 +1569,7 @@ func TestScrapeIntervalLarger(t *testing.T) {
// YAML marshaling must not reveal authentication credentials. // YAML marshaling must not reveal authentication credentials.
func TestElideSecrets(t *testing.T) { func TestElideSecrets(t *testing.T) {
c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger()) c, err := LoadFile("testdata/conf.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`) secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
@ -1586,31 +1586,31 @@ func TestElideSecrets(t *testing.T) {
func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) { func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
// Parse a valid file that sets a rule files with an absolute path // Parse a valid file that sets a rule files with an absolute path
c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger()) c, err := LoadFile(ruleFilesConfigFile, false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, ruleFilesExpectedConf, c) require.Equal(t, ruleFilesExpectedConf, c)
} }
func TestKubernetesEmptyAPIServer(t *testing.T) { func TestKubernetesEmptyAPIServer(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger()) _, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
} }
func TestKubernetesWithKubeConfig(t *testing.T) { func TestKubernetesWithKubeConfig(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger()) _, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
} }
func TestKubernetesSelectors(t *testing.T) { func TestKubernetesSelectors(t *testing.T) {
_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger()) _, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger()) _, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
} }
@ -2094,7 +2094,7 @@ func TestBadConfigs(t *testing.T) {
model.NameValidationScheme = model.UTF8Validation model.NameValidationScheme = model.UTF8Validation
}() }()
for _, ee := range expectedErrors { for _, ee := range expectedErrors {
_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger()) _, err := LoadFile("testdata/"+ee.filename, false, false, promslog.NewNopLogger())
require.ErrorContains(t, err, ee.errMsg, require.ErrorContains(t, err, ee.errMsg,
"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) "Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
} }
@ -2125,7 +2125,7 @@ func TestBadStaticConfigsYML(t *testing.T) {
} }
func TestEmptyConfig(t *testing.T) { func TestEmptyConfig(t *testing.T) {
c, err := Load("", false, log.NewNopLogger()) c, err := Load("", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
exp := DefaultConfig exp := DefaultConfig
require.Equal(t, exp, *c) require.Equal(t, exp, *c)
@ -2135,38 +2135,38 @@ func TestExpandExternalLabels(t *testing.T) {
// Cleanup ant TEST env variable that could exist on the system. // Cleanup ant TEST env variable that could exist on the system.
os.Setenv("TEST", "") os.Setenv("TEST", "")
c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger()) c, err := LoadFile("testdata/external_labels.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foo${TEST}bar", "foo", "${TEST}", "qux", "foo$${TEST}", "xyz", "foo$$bar"), c.GlobalConfig.ExternalLabels)
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "foobar", "foo", "", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
os.Setenv("TEST", "TestValue") os.Setenv("TEST", "TestValue")
c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger()) c, err = LoadFile("testdata/external_labels.good.yml", false, true, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels) testutil.RequireEqual(t, labels.FromStrings("bar", "foo", "baz", "fooTestValuebar", "foo", "TestValue", "qux", "foo${TEST}", "xyz", "foo$bar"), c.GlobalConfig.ExternalLabels)
} }
func TestAgentMode(t *testing.T) { func TestAgentMode(t *testing.T) {
_, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, log.NewNopLogger()) _, err := LoadFile("testdata/agent_mode.with_alert_manager.yml", true, false, promslog.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode") require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, log.NewNopLogger()) _, err = LoadFile("testdata/agent_mode.with_alert_relabels.yml", true, false, promslog.NewNopLogger())
require.ErrorContains(t, err, "field alerting is not allowed in agent mode") require.ErrorContains(t, err, "field alerting is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, log.NewNopLogger()) _, err = LoadFile("testdata/agent_mode.with_rule_files.yml", true, false, promslog.NewNopLogger())
require.ErrorContains(t, err, "field rule_files is not allowed in agent mode") require.ErrorContains(t, err, "field rule_files is not allowed in agent mode")
_, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, log.NewNopLogger()) _, err = LoadFile("testdata/agent_mode.with_remote_reads.yml", true, false, promslog.NewNopLogger())
require.ErrorContains(t, err, "field remote_read is not allowed in agent mode") require.ErrorContains(t, err, "field remote_read is not allowed in agent mode")
c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, log.NewNopLogger()) c, err := LoadFile("testdata/agent_mode.without_remote_writes.yml", true, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, c.RemoteWriteConfigs) require.Empty(t, c.RemoteWriteConfigs)
c, err = LoadFile("testdata/agent_mode.good.yml", true, false, log.NewNopLogger()) c, err = LoadFile("testdata/agent_mode.good.yml", true, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
require.Len(t, c.RemoteWriteConfigs, 1) require.Len(t, c.RemoteWriteConfigs, 1)
require.Equal( require.Equal(
@ -2177,7 +2177,7 @@ func TestAgentMode(t *testing.T) {
} }
func TestEmptyGlobalBlock(t *testing.T) { func TestEmptyGlobalBlock(t *testing.T) {
c, err := Load("global:\n", false, log.NewNopLogger()) c, err := Load("global:\n", false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
exp := DefaultConfig exp := DefaultConfig
exp.Runtime = DefaultRuntimeConfig exp.Runtime = DefaultRuntimeConfig
@ -2332,7 +2332,7 @@ func TestGetScrapeConfigs(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
c, err := LoadFile(tc.configFile, false, false, log.NewNopLogger()) c, err := LoadFile(tc.configFile, false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
scfgs, err := c.GetScrapeConfigs() scfgs, err := c.GetScrapeConfigs()
@ -2350,7 +2350,7 @@ func kubernetesSDHostURL() config.URL {
} }
func TestScrapeConfigDisableCompression(t *testing.T) { func TestScrapeConfigDisableCompression(t *testing.T) {
want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, log.NewNopLogger()) want, err := LoadFile("testdata/scrape_config_disable_compression.good.yml", false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)
@ -2397,7 +2397,7 @@ func TestScrapeConfigNameValidationSettings(t *testing.T) {
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, log.NewNopLogger()) want, err := LoadFile(fmt.Sprintf("testdata/%s.yml", tc.inputFile), false, false, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
out, err := yaml.Marshal(want) out, err := yaml.Marshal(want)

View file

@ -233,7 +233,7 @@ type Config interface {
} }
type DiscovererOptions struct { type DiscovererOptions struct {
Logger log.Logger Logger *slog.Logger
// A registerer for the Discoverer's metrics. // A registerer for the Discoverer's metrics.
Registerer prometheus.Registerer Registerer prometheus.Registerer

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
@ -29,11 +30,10 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/refresh"
@ -146,7 +146,7 @@ func (c *EC2SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
// the Discoverer interface. // the Discoverer interface.
type EC2Discovery struct { type EC2Discovery struct {
*refresh.Discovery *refresh.Discovery
logger log.Logger logger *slog.Logger
cfg *EC2SDConfig cfg *EC2SDConfig
ec2 *ec2.EC2 ec2 *ec2.EC2
@ -157,14 +157,14 @@ type EC2Discovery struct {
} }
// NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets. // NewEC2Discovery returns a new EC2Discovery which periodically refreshes its targets.
func NewEC2Discovery(conf *EC2SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) { func NewEC2Discovery(conf *EC2SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*EC2Discovery, error) {
m, ok := metrics.(*ec2Metrics) m, ok := metrics.(*ec2Metrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
d := &EC2Discovery{ d := &EC2Discovery{
logger: logger, logger: logger,
@ -254,8 +254,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
// Prometheus requires a reload if AWS adds a new AZ to the region. // Prometheus requires a reload if AWS adds a new AZ to the region.
if d.azToAZID == nil { if d.azToAZID == nil {
if err := d.refreshAZIDs(ctx); err != nil { if err := d.refreshAZIDs(ctx); err != nil {
level.Debug(d.logger).Log( d.logger.Debug(
"msg", "Unable to describe availability zones", "Unable to describe availability zones",
"err", err) "err", err)
} }
} }
@ -296,8 +296,8 @@ func (d *EC2Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error
labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone) labels[ec2LabelAZ] = model.LabelValue(*inst.Placement.AvailabilityZone)
azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone] azID, ok := d.azToAZID[*inst.Placement.AvailabilityZone]
if !ok && d.azToAZID != nil { if !ok && d.azToAZID != nil {
level.Debug(d.logger).Log( d.logger.Debug(
"msg", "Availability zone ID not found", "Availability zone ID not found",
"az", *inst.Placement.AvailabilityZone) "az", *inst.Placement.AvailabilityZone)
} }
labels[ec2LabelAZID] = model.LabelValue(azID) labels[ec2LabelAZID] = model.LabelValue(azID)

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
@ -29,10 +30,10 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/lightsail" "github.com/aws/aws-sdk-go/service/lightsail"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/refresh"
@ -130,14 +131,14 @@ type LightsailDiscovery struct {
} }
// NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets. // NewLightsailDiscovery returns a new LightsailDiscovery which periodically refreshes its targets.
func NewLightsailDiscovery(conf *LightsailSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) { func NewLightsailDiscovery(conf *LightsailSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*LightsailDiscovery, error) {
m, ok := metrics.(*lightsailMetrics) m, ok := metrics.(*lightsailMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
d := &LightsailDiscovery{ d := &LightsailDiscovery{

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
@ -35,10 +36,9 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache" cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
@ -175,7 +175,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
type Discovery struct { type Discovery struct {
*refresh.Discovery *refresh.Discovery
logger log.Logger logger *slog.Logger
cfg *SDConfig cfg *SDConfig
port int port int
cache *cache.Cache[string, *armnetwork.Interface] cache *cache.Cache[string, *armnetwork.Interface]
@ -183,14 +183,14 @@ type Discovery struct {
} }
// NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets. // NewDiscovery returns a new AzureDiscovery which periodically refreshes its targets.
func NewDiscovery(cfg *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(cfg *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*azureMetrics) m, ok := metrics.(*azureMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000))) l := cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5000)))
d := &Discovery{ d := &Discovery{
@ -228,13 +228,13 @@ type azureClient struct {
vm *armcompute.VirtualMachinesClient vm *armcompute.VirtualMachinesClient
vmss *armcompute.VirtualMachineScaleSetsClient vmss *armcompute.VirtualMachineScaleSetsClient
vmssvm *armcompute.VirtualMachineScaleSetVMsClient vmssvm *armcompute.VirtualMachineScaleSetVMsClient
logger log.Logger logger *slog.Logger
} }
var _ client = &azureClient{} var _ client = &azureClient{}
// createAzureClient is a helper function for creating an Azure compute client to ARM. // createAzureClient is a helper function for creating an Azure compute client to ARM.
func createAzureClient(cfg SDConfig, logger log.Logger) (client, error) { func createAzureClient(cfg SDConfig, logger *slog.Logger) (client, error) {
cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment) cloudConfiguration, err := CloudConfigurationFromName(cfg.Environment)
if err != nil { if err != nil {
return &azureClient{}, err return &azureClient{}, err
@ -337,21 +337,21 @@ type virtualMachine struct {
} }
// Create a new azureResource object from an ID string. // Create a new azureResource object from an ID string.
func newAzureResourceFromID(id string, logger log.Logger) (*arm.ResourceID, error) { func newAzureResourceFromID(id string, logger *slog.Logger) (*arm.ResourceID, error) {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
resourceID, err := arm.ParseResourceID(id) resourceID, err := arm.ParseResourceID(id)
if err != nil { if err != nil {
err := fmt.Errorf("invalid ID '%s': %w", id, err) err := fmt.Errorf("invalid ID '%s': %w", id, err)
level.Error(logger).Log("err", err) logger.Error("Failed to parse resource ID", "err", err)
return &arm.ResourceID{}, err return &arm.ResourceID{}, err
} }
return resourceID, nil return resourceID, nil
} }
func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
defer level.Debug(d.logger).Log("msg", "Azure discovery completed") defer d.logger.Debug("Azure discovery completed")
client, err := createAzureClient(*d.cfg, d.logger) client, err := createAzureClient(*d.cfg, d.logger)
if err != nil { if err != nil {
@ -365,7 +365,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
return nil, fmt.Errorf("could not get virtual machines: %w", err) return nil, fmt.Errorf("could not get virtual machines: %w", err)
} }
level.Debug(d.logger).Log("msg", "Found virtual machines during Azure discovery.", "count", len(machines)) d.logger.Debug("Found virtual machines during Azure discovery.", "count", len(machines))
// Load the vms managed by scale sets. // Load the vms managed by scale sets.
scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup) scaleSets, err := client.getScaleSets(ctx, d.cfg.ResourceGroup)
@ -459,7 +459,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
} }
if err != nil { if err != nil {
if errors.Is(err, errorNotFound) { if errors.Is(err, errorNotFound) {
level.Warn(d.logger).Log("msg", "Network interface does not exist", "name", nicID, "err", err) d.logger.Warn("Network interface does not exist", "name", nicID, "err", err)
} else { } else {
return nil, err return nil, err
} }
@ -480,7 +480,7 @@ func (d *Discovery) vmToLabelSet(ctx context.Context, client client, vm virtualM
// yet support this. On deallocated machines, this value happens to be nil so it // yet support this. On deallocated machines, this value happens to be nil so it
// is a cheap and easy way to determine if a machine is allocated or not. // is a cheap and easy way to determine if a machine is allocated or not.
if networkInterface.Properties.Primary == nil { if networkInterface.Properties.Primary == nil {
level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) d.logger.Debug("Skipping deallocated virtual machine", "machine", vm.Name)
return nil, nil return nil, nil
} }
@ -724,7 +724,7 @@ func (d *Discovery) addToCache(nicID string, netInt *armnetwork.Interface) {
rs := time.Duration(random) * time.Second rs := time.Duration(random) * time.Second
exptime := time.Duration(d.cfg.RefreshInterval*10) + rs exptime := time.Duration(d.cfg.RefreshInterval*10) + rs
d.cache.Set(nicID, netInt, cache.WithExpiration(exptime)) d.cache.Set(nicID, netInt, cache.WithExpiration(exptime))
level.Debug(d.logger).Log("msg", "Adding nic", "nic", nicID, "time", exptime.Seconds()) d.logger.Debug("Adding nic", "nic", nicID, "time", exptime.Seconds())
} }
// getFromCache will get the network Interface for the specified nicID // getFromCache will get the network Interface for the specified nicID

View file

@ -23,7 +23,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4"
cache "github.com/Code-Hex/go-generics-cache" cache "github.com/Code-Hex/go-generics-cache"
"github.com/Code-Hex/go-generics-cache/policy/lru" "github.com/Code-Hex/go-generics-cache/policy/lru"
"github.com/go-kit/log" "github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
) )
@ -150,7 +150,7 @@ func TestVMToLabelSet(t *testing.T) {
cfg := DefaultSDConfig cfg := DefaultSDConfig
d := &Discovery{ d := &Discovery{
cfg: &cfg, cfg: &cfg,
logger: log.NewNopLogger(), logger: promslog.NewNopLogger(),
cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))), cache: cache.New(cache.AsLRU[string, *armnetwork.Interface](lru.WithCapacity(5))),
} }
network := armnetwork.Interface{ network := armnetwork.Interface{

View file

@ -17,17 +17,17 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
consul "github.com/hashicorp/consul/api" consul "github.com/hashicorp/consul/api"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
@ -177,19 +177,19 @@ type Discovery struct {
allowStale bool allowStale bool
refreshInterval time.Duration refreshInterval time.Duration
finalizer func() finalizer func()
logger log.Logger logger *slog.Logger
metrics *consulMetrics metrics *consulMetrics
} }
// NewDiscovery returns a new Discovery for the given config. // NewDiscovery returns a new Discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*consulMetrics) m, ok := metrics.(*consulMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout)) wrapper, err := config.NewClientFromConfig(conf.HTTPClientConfig, "consul_sd", config.WithIdleConnTimeout(2*watchTimeout))
@ -282,7 +282,7 @@ func (d *Discovery) getDatacenter() error {
info, err := d.client.Agent().Self() info, err := d.client.Agent().Self()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) d.logger.Error("Error retrieving datacenter name", "err", err)
d.metrics.rpcFailuresCount.Inc() d.metrics.rpcFailuresCount.Inc()
return err return err
} }
@ -290,12 +290,12 @@ func (d *Discovery) getDatacenter() error {
dc, ok := info["Config"]["Datacenter"].(string) dc, ok := info["Config"]["Datacenter"].(string)
if !ok { if !ok {
err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"]) err := fmt.Errorf("invalid value '%v' for Config.Datacenter", info["Config"]["Datacenter"])
level.Error(d.logger).Log("msg", "Error retrieving datacenter name", "err", err) d.logger.Error("Error retrieving datacenter name", "err", err)
return err return err
} }
d.clientDatacenter = dc d.clientDatacenter = dc
d.logger = log.With(d.logger, "datacenter", dc) d.logger = d.logger.With("datacenter", dc)
return nil return nil
} }
@ -361,7 +361,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
// entire list of services. // entire list of services.
func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) { func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.Group, lastIndex *uint64, services map[string]func()) {
catalog := d.client.Catalog() catalog := d.client.Catalog()
level.Debug(d.logger).Log("msg", "Watching services", "tags", strings.Join(d.watchedTags, ",")) d.logger.Debug("Watching services", "tags", strings.Join(d.watchedTags, ","))
opts := &consul.QueryOptions{ opts := &consul.QueryOptions{
WaitIndex: *lastIndex, WaitIndex: *lastIndex,
@ -382,7 +382,7 @@ func (d *Discovery) watchServices(ctx context.Context, ch chan<- []*targetgroup.
} }
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error refreshing service list", "err", err) d.logger.Error("Error refreshing service list", "err", err)
d.metrics.rpcFailuresCount.Inc() d.metrics.rpcFailuresCount.Inc()
time.Sleep(retryInterval) time.Sleep(retryInterval)
return return
@ -445,7 +445,7 @@ type consulService struct {
discovery *Discovery discovery *Discovery
client *consul.Client client *consul.Client
tagSeparator string tagSeparator string
logger log.Logger logger *slog.Logger
rpcFailuresCount prometheus.Counter rpcFailuresCount prometheus.Counter
serviceRPCDuration prometheus.Observer serviceRPCDuration prometheus.Observer
} }
@ -490,7 +490,7 @@ func (d *Discovery) watchService(ctx context.Context, ch chan<- []*targetgroup.G
// Get updates for a service. // Get updates for a service.
func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) { func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Group, health *consul.Health, lastIndex *uint64) {
level.Debug(srv.logger).Log("msg", "Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ",")) srv.logger.Debug("Watching service", "service", srv.name, "tags", strings.Join(srv.tags, ","))
opts := &consul.QueryOptions{ opts := &consul.QueryOptions{
WaitIndex: *lastIndex, WaitIndex: *lastIndex,
@ -513,7 +513,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr
} }
if err != nil { if err != nil {
level.Error(srv.logger).Log("msg", "Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err) srv.logger.Error("Error refreshing service", "service", srv.name, "tags", strings.Join(srv.tags, ","), "err", err)
srv.rpcFailuresCount.Inc() srv.rpcFailuresCount.Inc()
time.Sleep(retryInterval) time.Sleep(retryInterval)
return return

View file

@ -21,10 +21,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -270,7 +270,7 @@ func newServer(t *testing.T) (*httptest.Server, *SDConfig) {
} }
func newDiscovery(t *testing.T, config *SDConfig) *Discovery { func newDiscovery(t *testing.T, config *SDConfig) *Discovery {
logger := log.NewNopLogger() logger := promslog.NewNopLogger()
metrics := NewTestMetrics(t, config, prometheus.NewRegistry()) metrics := NewTestMetrics(t, config, prometheus.NewRegistry())

View file

@ -16,6 +16,7 @@ package digitalocean
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
@ -23,7 +24,6 @@ import (
"time" "time"
"github.com/digitalocean/godo" "github.com/digitalocean/godo"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -111,7 +111,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*digitaloceanMetrics) m, ok := metrics.(*digitaloceanMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -19,9 +19,9 @@ import (
"net/url" "net/url"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -57,7 +57,7 @@ func TestDigitalOceanSDRefresh(t *testing.T) {
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
endpoint, err := url.Parse(sdmock.Mock.Endpoint()) endpoint, err := url.Parse(sdmock.Mock.Endpoint())
require.NoError(t, err) require.NoError(t, err)

View file

@ -15,9 +15,9 @@ package discovery
import ( import (
"context" "context"
"log/slog"
"reflect" "reflect"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -47,7 +47,7 @@ type DiscovererMetrics interface {
// DiscovererOptions provides options for a Discoverer. // DiscovererOptions provides options for a Discoverer.
type DiscovererOptions struct { type DiscovererOptions struct {
Logger log.Logger Logger *slog.Logger
Metrics DiscovererMetrics Metrics DiscovererMetrics

View file

@ -17,17 +17,17 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh" "github.com/prometheus/prometheus/discovery/refresh"
@ -111,21 +111,21 @@ type Discovery struct {
names []string names []string
port int port int
qtype uint16 qtype uint16
logger log.Logger logger *slog.Logger
metrics *dnsMetrics metrics *dnsMetrics
lookupFn func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) lookupFn func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dnsMetrics) m, ok := metrics.(*dnsMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
qtype := dns.TypeSRV qtype := dns.TypeSRV
@ -174,7 +174,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
for _, name := range d.names { for _, name := range d.names {
go func(n string) { go func(n string) {
if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) { if err := d.refreshOne(ctx, n, ch); err != nil && !errors.Is(err, context.Canceled) {
level.Error(d.logger).Log("msg", "Error refreshing DNS targets", "err", err) d.logger.Error("Error refreshing DNS targets", "err", err)
} }
wg.Done() wg.Done()
}(name) }(name)
@ -238,7 +238,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
// CNAME responses can occur with "Type: A" dns_sd_config requests. // CNAME responses can occur with "Type: A" dns_sd_config requests.
continue continue
default: default:
level.Warn(d.logger).Log("msg", "Invalid record", "record", record) d.logger.Warn("Invalid record", "record", record)
continue continue
} }
tg.Targets = append(tg.Targets, model.LabelSet{ tg.Targets = append(tg.Targets, model.LabelSet{
@ -288,7 +288,7 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ
// error will be generic-looking, because trying to return all the errors // error will be generic-looking, because trying to return all the errors
// returned by the combination of all name permutations and servers is a // returned by the combination of all name permutations and servers is a
// nightmare. // nightmare.
func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { func lookupWithSearchPath(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
conf, err := dns.ClientConfigFromFile(resolvConf) conf, err := dns.ClientConfigFromFile(resolvConf)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not load resolv.conf: %w", err) return nil, fmt.Errorf("could not load resolv.conf: %w", err)
@ -337,14 +337,14 @@ func lookupWithSearchPath(name string, qtype uint16, logger log.Logger) (*dns.Ms
// A non-viable answer is "anything else", which encompasses both various // A non-viable answer is "anything else", which encompasses both various
// system-level problems (like network timeouts) and also // system-level problems (like network timeouts) and also
// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). // valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc).
func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger log.Logger) (*dns.Msg, error) { func lookupFromAnyServer(name string, qtype uint16, conf *dns.ClientConfig, logger *slog.Logger) (*dns.Msg, error) {
client := &dns.Client{} client := &dns.Client{}
for _, server := range conf.Servers { for _, server := range conf.Servers {
servAddr := net.JoinHostPort(server, conf.Port) servAddr := net.JoinHostPort(server, conf.Port)
msg, err := askServerForName(name, qtype, client, servAddr, true) msg, err := askServerForName(name, qtype, client, servAddr, true)
if err != nil { if err != nil {
level.Warn(logger).Log("msg", "DNS resolution failed", "server", server, "name", name, "err", err) logger.Warn("DNS resolution failed", "server", server, "name", name, "err", err)
continue continue
} }

View file

@ -16,11 +16,11 @@ package dns
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -40,7 +40,7 @@ func TestDNS(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
config SDConfig config SDConfig
lookup func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) lookup func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error)
expected []*targetgroup.Group expected []*targetgroup.Group
}{ }{
@ -52,7 +52,7 @@ func TestDNS(t *testing.T) {
Port: 80, Port: 80,
Type: "A", Type: "A",
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return nil, fmt.Errorf("some error") return nil, fmt.Errorf("some error")
}, },
expected: []*targetgroup.Group{}, expected: []*targetgroup.Group{},
@ -65,7 +65,7 @@ func TestDNS(t *testing.T) {
Port: 80, Port: 80,
Type: "A", Type: "A",
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{ return &dns.Msg{
Answer: []dns.RR{ Answer: []dns.RR{
&dns.A{A: net.IPv4(192, 0, 2, 2)}, &dns.A{A: net.IPv4(192, 0, 2, 2)},
@ -97,7 +97,7 @@ func TestDNS(t *testing.T) {
Port: 80, Port: 80,
Type: "AAAA", Type: "AAAA",
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{ return &dns.Msg{
Answer: []dns.RR{ Answer: []dns.RR{
&dns.AAAA{AAAA: net.IPv6loopback}, &dns.AAAA{AAAA: net.IPv6loopback},
@ -128,7 +128,7 @@ func TestDNS(t *testing.T) {
Type: "SRV", Type: "SRV",
RefreshInterval: model.Duration(time.Minute), RefreshInterval: model.Duration(time.Minute),
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{ return &dns.Msg{
Answer: []dns.RR{ Answer: []dns.RR{
&dns.SRV{Port: 3306, Target: "db1.example.com."}, &dns.SRV{Port: 3306, Target: "db1.example.com."},
@ -167,7 +167,7 @@ func TestDNS(t *testing.T) {
Names: []string{"_mysql._tcp.db.example.com."}, Names: []string{"_mysql._tcp.db.example.com."},
RefreshInterval: model.Duration(time.Minute), RefreshInterval: model.Duration(time.Minute),
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{ return &dns.Msg{
Answer: []dns.RR{ Answer: []dns.RR{
&dns.SRV{Port: 3306, Target: "db1.example.com."}, &dns.SRV{Port: 3306, Target: "db1.example.com."},
@ -198,7 +198,7 @@ func TestDNS(t *testing.T) {
Names: []string{"_mysql._tcp.db.example.com."}, Names: []string{"_mysql._tcp.db.example.com."},
RefreshInterval: model.Duration(time.Minute), RefreshInterval: model.Duration(time.Minute),
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{}, nil return &dns.Msg{}, nil
}, },
expected: []*targetgroup.Group{ expected: []*targetgroup.Group{
@ -215,7 +215,7 @@ func TestDNS(t *testing.T) {
Port: 25, Port: 25,
RefreshInterval: model.Duration(time.Minute), RefreshInterval: model.Duration(time.Minute),
}, },
lookup: func(name string, qtype uint16, logger log.Logger) (*dns.Msg, error) { lookup: func(name string, qtype uint16, logger *slog.Logger) (*dns.Msg, error) {
return &dns.Msg{ return &dns.Msg{
Answer: []dns.RR{ Answer: []dns.RR{
&dns.MX{Preference: 0, Mx: "smtp1.example.com."}, &dns.MX{Preference: 0, Mx: "smtp1.example.com."},

View file

@ -17,13 +17,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -126,7 +126,7 @@ type Discovery struct {
} }
// NewDiscovery creates a new Eureka discovery for the given role. // NewDiscovery creates a new Eureka discovery for the given role.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*eurekaMetrics) m, ok := metrics.(*eurekaMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -19,6 +19,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -26,12 +27,11 @@ import (
"time" "time"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -175,20 +175,20 @@ type Discovery struct {
// and how many target groups they contained. // and how many target groups they contained.
// This is used to detect deleted target groups. // This is used to detect deleted target groups.
lastRefresh map[string]int lastRefresh map[string]int
logger log.Logger logger *slog.Logger
metrics *fileMetrics metrics *fileMetrics
} }
// NewDiscovery returns a new file discovery for the given paths. // NewDiscovery returns a new file discovery for the given paths.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
fm, ok := metrics.(*fileMetrics) fm, ok := metrics.(*fileMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
disc := &Discovery{ disc := &Discovery{
@ -210,7 +210,7 @@ func (d *Discovery) listFiles() []string {
for _, p := range d.paths { for _, p := range d.paths {
files, err := filepath.Glob(p) files, err := filepath.Glob(p)
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error expanding glob", "glob", p, "err", err) d.logger.Error("Error expanding glob", "glob", p, "err", err)
continue continue
} }
paths = append(paths, files...) paths = append(paths, files...)
@ -231,7 +231,7 @@ func (d *Discovery) watchFiles() {
p = "./" p = "./"
} }
if err := d.watcher.Add(p); err != nil { if err := d.watcher.Add(p); err != nil {
level.Error(d.logger).Log("msg", "Error adding file watch", "path", p, "err", err) d.logger.Error("Error adding file watch", "path", p, "err", err)
} }
} }
} }
@ -240,7 +240,7 @@ func (d *Discovery) watchFiles() {
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error adding file watcher", "err", err) d.logger.Error("Error adding file watcher", "err", err)
d.metrics.fileWatcherErrorsCount.Inc() d.metrics.fileWatcherErrorsCount.Inc()
return return
} }
@ -280,7 +280,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
case err := <-d.watcher.Errors: case err := <-d.watcher.Errors:
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error watching file", "err", err) d.logger.Error("Error watching file", "err", err)
} }
} }
} }
@ -300,7 +300,7 @@ func (d *Discovery) deleteTimestamp(filename string) {
// stop shuts down the file watcher. // stop shuts down the file watcher.
func (d *Discovery) stop() { func (d *Discovery) stop() {
level.Debug(d.logger).Log("msg", "Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths)) d.logger.Debug("Stopping file discovery...", "paths", fmt.Sprintf("%v", d.paths))
done := make(chan struct{}) done := make(chan struct{})
defer close(done) defer close(done)
@ -320,10 +320,10 @@ func (d *Discovery) stop() {
} }
}() }()
if err := d.watcher.Close(); err != nil { if err := d.watcher.Close(); err != nil {
level.Error(d.logger).Log("msg", "Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err) d.logger.Error("Error closing file watcher", "paths", fmt.Sprintf("%v", d.paths), "err", err)
} }
level.Debug(d.logger).Log("msg", "File discovery stopped") d.logger.Debug("File discovery stopped")
} }
// refresh reads all files matching the discovery's patterns and sends the respective // refresh reads all files matching the discovery's patterns and sends the respective
@ -339,7 +339,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
if err != nil { if err != nil {
d.metrics.fileSDReadErrorsCount.Inc() d.metrics.fileSDReadErrorsCount.Inc()
level.Error(d.logger).Log("msg", "Error reading file", "path", p, "err", err) d.logger.Error("Error reading file", "path", p, "err", err)
// Prevent deletion down below. // Prevent deletion down below.
ref[p] = d.lastRefresh[p] ref[p] = d.lastRefresh[p]
continue continue
@ -356,7 +356,7 @@ func (d *Discovery) refresh(ctx context.Context, ch chan<- []*targetgroup.Group)
for f, n := range d.lastRefresh { for f, n := range d.lastRefresh {
m, ok := ref[f] m, ok := ref[f]
if !ok || n > m { if !ok || n > m {
level.Debug(d.logger).Log("msg", "file_sd refresh found file that should be removed", "file", f) d.logger.Debug("file_sd refresh found file that should be removed", "file", f)
d.deleteTimestamp(f) d.deleteTimestamp(f)
for i := m; i < n; i++ { for i := m; i < n; i++ {
select { select {

View file

@ -17,12 +17,12 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
@ -129,7 +129,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*gceMetrics) m, ok := metrics.(*gceMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -15,12 +15,12 @@ package hetzner
import ( import (
"context" "context"
"log/slog"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"time" "time"
"github.com/go-kit/log"
"github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/hetznercloud/hcloud-go/v2/hcloud"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -58,7 +58,7 @@ type hcloudDiscovery struct {
} }
// newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets. // newHcloudDiscovery returns a new hcloudDiscovery which periodically refreshes its targets.
func newHcloudDiscovery(conf *SDConfig, _ log.Logger) (*hcloudDiscovery, error) { func newHcloudDiscovery(conf *SDConfig, _ *slog.Logger) (*hcloudDiscovery, error) {
d := &hcloudDiscovery{ d := &hcloudDiscovery{
port: conf.Port, port: conf.Port,
} }

View file

@ -18,8 +18,8 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -43,7 +43,7 @@ func TestHCloudSDRefresh(t *testing.T) {
cfg.HTTPClientConfig.BearerToken = hcloudTestToken cfg.HTTPClientConfig.BearerToken = hcloudTestToken
cfg.hcloudEndpoint = suite.Mock.Endpoint() cfg.hcloudEndpoint = suite.Mock.Endpoint()
d, err := newHcloudDiscovery(&cfg, log.NewNopLogger()) d, err := newHcloudDiscovery(&cfg, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
targetGroups, err := d.refresh(context.Background()) targetGroups, err := d.refresh(context.Background())

View file

@ -17,9 +17,9 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"time" "time"
"github.com/go-kit/log"
"github.com/hetznercloud/hcloud-go/v2/hcloud" "github.com/hetznercloud/hcloud-go/v2/hcloud"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -135,7 +135,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*hetznerMetrics) m, ok := metrics.(*hetznerMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
@ -157,7 +157,7 @@ func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.Discovere
), nil ), nil
} }
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
switch conf.Role { switch conf.Role {
case HetznerRoleHcloud: case HetznerRoleHcloud:
if conf.hcloudEndpoint == "" { if conf.hcloudEndpoint == "" {

View file

@ -18,13 +18,13 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
@ -51,7 +51,7 @@ type robotDiscovery struct {
} }
// newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets. // newRobotDiscovery returns a new robotDiscovery which periodically refreshes its targets.
func newRobotDiscovery(conf *SDConfig, _ log.Logger) (*robotDiscovery, error) { func newRobotDiscovery(conf *SDConfig, _ *slog.Logger) (*robotDiscovery, error) {
d := &robotDiscovery{ d := &robotDiscovery{
port: conf.Port, port: conf.Port,
endpoint: conf.robotEndpoint, endpoint: conf.robotEndpoint,

View file

@ -18,9 +18,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -42,7 +42,7 @@ func TestRobotSDRefresh(t *testing.T) {
cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword} cfg.HTTPClientConfig.BasicAuth = &config.BasicAuth{Username: robotTestUsername, Password: robotTestPassword}
cfg.robotEndpoint = suite.Mock.Endpoint() cfg.robotEndpoint = suite.Mock.Endpoint()
d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
targetGroups, err := d.refresh(context.Background()) targetGroups, err := d.refresh(context.Background())
@ -91,7 +91,7 @@ func TestRobotSDRefreshHandleError(t *testing.T) {
cfg := DefaultSDConfig cfg := DefaultSDConfig
cfg.robotEndpoint = suite.Mock.Endpoint() cfg.robotEndpoint = suite.Mock.Endpoint()
d, err := newRobotDiscovery(&cfg, log.NewNopLogger()) d, err := newRobotDiscovery(&cfg, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
targetGroups, err := d.refresh(context.Background()) targetGroups, err := d.refresh(context.Background())

View file

@ -19,17 +19,18 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -114,14 +115,14 @@ type Discovery struct {
} }
// NewDiscovery returns a new HTTP discovery for the given config. // NewDiscovery returns a new HTTP discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, clientOpts []config.HTTPClientOption, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*httpMetrics) m, ok := metrics.(*httpMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...) client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http", clientOpts...)

View file

@ -21,11 +21,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -49,7 +49,7 @@ func TestHTTPValidRefresh(t *testing.T) {
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
defer metrics.Unregister() defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -94,7 +94,7 @@ func TestHTTPInvalidCode(t *testing.T) {
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
defer metrics.Unregister() defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -123,7 +123,7 @@ func TestHTTPInvalidFormat(t *testing.T) {
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
defer metrics.Unregister() defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -442,7 +442,7 @@ func TestSourceDisappeared(t *testing.T) {
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
defer metrics.Unregister() defer metrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), nil, metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), nil, metrics)
require.NoError(t, err) require.NoError(t, err)
for _, test := range cases { for _, test := range cases {
ctx := context.Background() ctx := context.Background()

View file

@ -16,9 +16,9 @@ package ionos
import ( import (
"errors" "errors"
"fmt" "fmt"
"log/slog"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -43,7 +43,7 @@ func init() {
type Discovery struct{} type Discovery struct{}
// NewDiscovery returns a new refresh.Discovery for IONOS Cloud. // NewDiscovery returns a new refresh.Discovery for IONOS Cloud.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ionosMetrics) m, ok := metrics.(*ionosMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -16,13 +16,13 @@ package ionos
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
ionoscloud "github.com/ionos-cloud/sdk-go/v6" ionoscloud "github.com/ionos-cloud/sdk-go/v6"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -60,7 +60,7 @@ type serverDiscovery struct {
datacenterID string datacenterID string
} }
func newServerDiscovery(conf *SDConfig, _ log.Logger) (*serverDiscovery, error) { func newServerDiscovery(conf *SDConfig, _ *slog.Logger) (*serverDiscovery, error) {
d := &serverDiscovery{ d := &serverDiscovery{
port: conf.Port, port: conf.Port,
datacenterID: conf.DatacenterID, datacenterID: conf.DatacenterID,

View file

@ -17,13 +17,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
@ -33,7 +33,7 @@ import (
// Endpoints discovers new endpoint targets. // Endpoints discovers new endpoint targets.
type Endpoints struct { type Endpoints struct {
logger log.Logger logger *slog.Logger
endpointsInf cache.SharedIndexInformer endpointsInf cache.SharedIndexInformer
serviceInf cache.SharedInformer serviceInf cache.SharedInformer
@ -49,9 +49,9 @@ type Endpoints struct {
} }
// NewEndpoints returns a new endpoints discovery. // NewEndpoints returns a new endpoints discovery.
func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints {
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd)
@ -92,13 +92,13 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err) l.Error("Error adding endpoints event handler.", "err", err)
} }
serviceUpdate := func(o interface{}) { serviceUpdate := func(o interface{}) {
svc, err := convertToService(o) svc, err := convertToService(o)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) e.logger.Error("converting to Service object failed", "err", err)
return return
} }
@ -111,7 +111,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
} }
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) e.logger.Error("retrieving endpoints failed", "err", err)
} }
} }
_, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -131,7 +131,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err) l.Error("Error adding services event handler.", "err", err)
} }
_, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
@ -154,7 +154,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) l.Error("Error adding pods event handler.", "err", err)
} }
if e.withNodeMetadata { if e.withNodeMetadata {
_, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -172,7 +172,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) l.Error("Error adding nodes event handler.", "err", err)
} }
} }
@ -182,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca
func (e *Endpoints) enqueueNode(nodeName string) { func (e *Endpoints) enqueueNode(nodeName string) {
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName) endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
return return
} }
@ -194,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) {
func (e *Endpoints) enqueuePod(podNamespacedName string) { func (e *Endpoints) enqueuePod(podNamespacedName string) {
endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err) e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err)
return return
} }
@ -223,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache") e.logger.Error("endpoints informer unable to sync cache")
} }
return return
} }
@ -247,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "splitting key failed", "key", key) e.logger.Error("splitting key failed", "key", key)
return true return true
} }
o, exists, err := e.endpointsStore.GetByKey(key) o, exists, err := e.endpointsStore.GetByKey(key)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) e.logger.Error("getting object from store failed", "key", key)
return true return true
} }
if !exists { if !exists {
@ -262,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group)
} }
eps, err := convertToEndpoints(o) eps, err := convertToEndpoints(o)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err) e.logger.Error("converting to Endpoints object failed", "err", err)
return true return true
} }
send(ctx, ch, e.buildEndpoints(eps)) send(ctx, ch, e.buildEndpoints(eps))
@ -400,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group {
v := eps.Labels[apiv1.EndpointsOverCapacity] v := eps.Labels[apiv1.EndpointsOverCapacity]
if v == "truncated" { if v == "truncated" {
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
} }
if v == "warning" { if v == "warning" {
level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name)
} }
// For all seen pods, check all container ports. If they were not covered // For all seen pods, check all container ports. If they were not covered
@ -460,7 +460,7 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
obj, exists, err := e.podStore.Get(p) obj, exists, err := e.podStore.Get(p)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) e.logger.Error("resolving pod ref failed", "err", err)
return nil return nil
} }
if !exists { if !exists {
@ -476,7 +476,7 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
obj, exists, err := e.serviceStore.Get(svc) obj, exists, err := e.serviceStore.Get(svc)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) e.logger.Error("retrieving service failed", "err", err)
return return
} }
if !exists { if !exists {
@ -487,14 +487,14 @@ func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) {
tg.Labels = tg.Labels.Merge(serviceLabels(svc)) tg.Labels = tg.Labels.Merge(serviceLabels(svc))
} }
func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet { func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet {
if nodeName == nil { if nodeName == nil {
return tg return tg
} }
obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName) obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err) logger.Error("Error getting node", "node", *nodeName, "err", err)
return tg return tg
} }

View file

@ -17,13 +17,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
v1 "k8s.io/api/discovery/v1" v1 "k8s.io/api/discovery/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
@ -35,7 +35,7 @@ import (
// EndpointSlice discovers new endpoint targets. // EndpointSlice discovers new endpoint targets.
type EndpointSlice struct { type EndpointSlice struct {
logger log.Logger logger *slog.Logger
endpointSliceInf cache.SharedIndexInformer endpointSliceInf cache.SharedIndexInformer
serviceInf cache.SharedInformer serviceInf cache.SharedInformer
@ -51,9 +51,9 @@ type EndpointSlice struct {
} }
// NewEndpointSlice returns a new endpointslice discovery. // NewEndpointSlice returns a new endpointslice discovery.
func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice {
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd)
@ -92,13 +92,13 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err) l.Error("Error adding endpoint slices event handler.", "err", err)
} }
serviceUpdate := func(o interface{}) { serviceUpdate := func(o interface{}) {
svc, err := convertToService(o) svc, err := convertToService(o)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) e.logger.Error("converting to Service object failed", "err", err)
return return
} }
@ -108,7 +108,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
for _, obj := range e.endpointSliceStore.List() { for _, obj := range e.endpointSliceStore.List() {
esa, err := e.getEndpointSliceAdaptor(obj) esa, err := e.getEndpointSliceAdaptor(obj)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) e.logger.Error("converting to EndpointSlice object failed", "err", err)
continue continue
} }
if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name { if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name {
@ -131,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err) l.Error("Error adding services event handler.", "err", err)
} }
if e.withNodeMetadata { if e.withNodeMetadata {
@ -150,7 +150,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) l.Error("Error adding nodes event handler.", "err", err)
} }
} }
@ -160,7 +160,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod
func (e *EndpointSlice) enqueueNode(nodeName string) { func (e *EndpointSlice) enqueueNode(nodeName string) {
endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName) endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err)
return return
} }
@ -188,7 +188,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group)
} }
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") e.logger.Error("endpointslice informer unable to sync cache")
} }
return return
} }
@ -212,13 +212,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "splitting key failed", "key", key) e.logger.Error("splitting key failed", "key", key)
return true return true
} }
o, exists, err := e.endpointSliceStore.GetByKey(key) o, exists, err := e.endpointSliceStore.GetByKey(key)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) e.logger.Error("getting object from store failed", "key", key)
return true return true
} }
if !exists { if !exists {
@ -228,7 +228,7 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr
esa, err := e.getEndpointSliceAdaptor(o) esa, err := e.getEndpointSliceAdaptor(o)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) e.logger.Error("converting to EndpointSlice object failed", "err", err)
return true return true
} }
@ -470,7 +470,7 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod {
obj, exists, err := e.podStore.Get(p) obj, exists, err := e.podStore.Get(p)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) e.logger.Error("resolving pod ref failed", "err", err)
return nil return nil
} }
if !exists { if !exists {
@ -495,7 +495,7 @@ func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgro
obj, exists, err := e.serviceStore.Get(svc) obj, exists, err := e.serviceStore.Get(svc)
if err != nil { if err != nil {
level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) e.logger.Error("retrieving service failed", "err", err)
return return
} }
if !exists { if !exists {

View file

@ -17,10 +17,9 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
v1 "k8s.io/api/networking/v1" v1 "k8s.io/api/networking/v1"
@ -32,14 +31,14 @@ import (
// Ingress implements discovery of Kubernetes ingress. // Ingress implements discovery of Kubernetes ingress.
type Ingress struct { type Ingress struct {
logger log.Logger logger *slog.Logger
informer cache.SharedInformer informer cache.SharedInformer
store cache.Store store cache.Store
queue *workqueue.Type queue *workqueue.Type
} }
// NewIngress returns a new ingress discovery. // NewIngress returns a new ingress discovery.
func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress {
ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd)
ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate)
ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete)
@ -66,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err) l.Error("Error adding ingresses event handler.", "err", err)
} }
return s return s
} }
@ -86,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(i.logger).Log("msg", "ingress informer unable to sync cache") i.logger.Error("ingress informer unable to sync cache")
} }
return return
} }
@ -127,7 +126,7 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
case *v1.Ingress: case *v1.Ingress:
ia = newIngressAdaptorFromV1(ingress) ia = newIngressAdaptorFromV1(ingress)
default: default:
level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", i.logger.Error("converting to Ingress object failed", "err",
fmt.Errorf("received unexpected object: %v", o)) fmt.Errorf("received unexpected object: %v", o))
return true return true
} }

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"os" "os"
"reflect" "reflect"
"strings" "strings"
@ -25,11 +26,10 @@ import (
"github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/strutil"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
disv1 "k8s.io/api/discovery/v1" disv1 "k8s.io/api/discovery/v1"
@ -260,7 +260,7 @@ type Discovery struct {
sync.RWMutex sync.RWMutex
client kubernetes.Interface client kubernetes.Interface
role Role role Role
logger log.Logger logger *slog.Logger
namespaceDiscovery *NamespaceDiscovery namespaceDiscovery *NamespaceDiscovery
discoverers []discovery.Discoverer discoverers []discovery.Discoverer
selectors roleSelector selectors roleSelector
@ -285,14 +285,14 @@ func (d *Discovery) getNamespaces() []string {
} }
// New creates a new Kubernetes discovery for the given role. // New creates a new Kubernetes discovery for the given role.
func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) {
m, ok := metrics.(*kubernetesMetrics) m, ok := metrics.(*kubernetesMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
var ( var (
kcfg *rest.Config kcfg *rest.Config
@ -324,7 +324,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di
ownNamespace = string(ownNamespaceContents) ownNamespace = string(ownNamespaceContents)
} }
level.Info(l).Log("msg", "Using pod service account via in-cluster config") l.Info("Using pod service account via in-cluster config")
default: default:
rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
if err != nil { if err != nil {
@ -446,7 +446,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
go nodeInf.Run(ctx.Done()) go nodeInf.Run(ctx.Done())
} }
eps := NewEndpointSlice( eps := NewEndpointSlice(
log.With(d.logger, "role", "endpointslice"), d.logger.With("role", "endpointslice"),
informer, informer,
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
@ -506,7 +506,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
eps := NewEndpoints( eps := NewEndpoints(
log.With(d.logger, "role", "endpoint"), d.logger.With("role", "endpoint"),
d.newEndpointsByNodeInformer(elw), d.newEndpointsByNodeInformer(elw),
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled),
@ -540,7 +540,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}, },
} }
pod := NewPod( pod := NewPod(
log.With(d.logger, "role", "pod"), d.logger.With("role", "pod"),
d.newPodsByNodeInformer(plw), d.newPodsByNodeInformer(plw),
nodeInformer, nodeInformer,
d.metrics.eventCount, d.metrics.eventCount,
@ -564,7 +564,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
}, },
} }
svc := NewService( svc := NewService(
log.With(d.logger, "role", "service"), d.logger.With("role", "service"),
d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled),
d.metrics.eventCount, d.metrics.eventCount,
) )
@ -589,7 +589,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled)
ingress := NewIngress( ingress := NewIngress(
log.With(d.logger, "role", "ingress"), d.logger.With("role", "ingress"),
informer, informer,
d.metrics.eventCount, d.metrics.eventCount,
) )
@ -598,11 +598,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
case RoleNode: case RoleNode:
nodeInformer := d.newNodeInformer(ctx) nodeInformer := d.newNodeInformer(ctx)
node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount) node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount)
d.discoverers = append(d.discoverers, node) d.discoverers = append(d.discoverers, node)
go node.informer.Run(ctx.Done()) go node.informer.Run(ctx.Done())
default: default:
level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role) d.logger.Error("unknown Kubernetes discovery kind", "role", d.role)
} }
var wg sync.WaitGroup var wg sync.WaitGroup

View file

@ -20,8 +20,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -71,7 +71,7 @@ func makeDiscoveryWithVersion(role Role, nsDiscovery NamespaceDiscovery, k8sVer
d := &Discovery{ d := &Discovery{
client: clientset, client: clientset,
logger: log.NewNopLogger(), logger: promslog.NewNopLogger(),
role: role, role: role,
namespaceDiscovery: &nsDiscovery, namespaceDiscovery: &nsDiscovery,
ownNamespace: "own-ns", ownNamespace: "own-ns",

View file

@ -17,13 +17,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
@ -38,16 +38,16 @@ const (
// Node discovers Kubernetes nodes. // Node discovers Kubernetes nodes.
type Node struct { type Node struct {
logger log.Logger logger *slog.Logger
informer cache.SharedInformer informer cache.SharedInformer
store cache.Store store cache.Store
queue *workqueue.Type queue *workqueue.Type
} }
// NewNode returns a new node discovery. // NewNode returns a new node discovery.
func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node {
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd)
@ -76,7 +76,7 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) l.Error("Error adding nodes event handler.", "err", err)
} }
return n return n
} }
@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(n.logger).Log("msg", "node informer unable to sync cache") n.logger.Error("node informer unable to sync cache")
} }
return return
} }
@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
} }
node, err := convertToNode(o) node, err := convertToNode(o)
if err != nil { if err != nil {
level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err) n.logger.Error("converting to Node object failed", "err", err)
return true return true
} }
send(ctx, ch, n.buildNode(node)) send(ctx, ch, n.buildNode(node))
@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group {
addr, addrMap, err := nodeAddress(node) addr, addrMap, err := nodeAddress(node)
if err != nil { if err != nil {
level.Warn(n.logger).Log("msg", "No node address found", "err", err) n.logger.Warn("No node address found", "err", err)
return nil return nil
} }
addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10))

View file

@ -17,14 +17,14 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
@ -44,14 +44,14 @@ type Pod struct {
nodeInf cache.SharedInformer nodeInf cache.SharedInformer
withNodeMetadata bool withNodeMetadata bool
store cache.Store store cache.Store
logger log.Logger logger *slog.Logger
queue *workqueue.Type queue *workqueue.Type
} }
// NewPod creates a new pod discovery. // NewPod creates a new pod discovery.
func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod {
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd)
@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) l.Error("Error adding pods event handler.", "err", err)
} }
if p.withNodeMetadata { if p.withNodeMetadata {
@ -100,7 +100,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) l.Error("Error adding pods event handler.", "err", err)
} }
} }
@ -127,7 +127,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(p.logger).Log("msg", "pod informer unable to sync cache") p.logger.Error("pod informer unable to sync cache")
} }
return return
} }
@ -164,7 +164,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool
} }
pod, err := convertToPod(o) pod, err := convertToPod(o)
if err != nil { if err != nil {
level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err) p.logger.Error("converting to Pod object failed", "err", err)
return true return true
} }
send(ctx, ch, p.buildPod(pod)) send(ctx, ch, p.buildPod(pod))
@ -246,7 +246,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe
func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string {
cStatus, err := p.findPodContainerStatus(statuses, containerName) cStatus, err := p.findPodContainerStatus(statuses, containerName)
if err != nil { if err != nil {
level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) p.logger.Debug("cannot find container ID", "err", err)
return "" return ""
} }
return cStatus.ContainerID return cStatus.ContainerID
@ -315,7 +315,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group {
func (p *Pod) enqueuePodsForNode(nodeName string) { func (p *Pod) enqueuePodsForNode(nodeName string) {
pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName) pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName)
if err != nil { if err != nil {
level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err) p.logger.Error("Error getting pods for node", "node", nodeName, "err", err)
return return
} }

View file

@ -17,13 +17,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
apiv1 "k8s.io/api/core/v1" apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
@ -33,16 +33,16 @@ import (
// Service implements discovery of Kubernetes services. // Service implements discovery of Kubernetes services.
type Service struct { type Service struct {
logger log.Logger logger *slog.Logger
informer cache.SharedInformer informer cache.SharedInformer
store cache.Store store cache.Store
queue *workqueue.Type queue *workqueue.Type
} }
// NewService returns a new service discovery. // NewService returns a new service discovery.
func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service {
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd)
@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C
}, },
}) })
if err != nil { if err != nil {
level.Error(l).Log("msg", "Error adding services event handler.", "err", err) l.Error("Error adding services event handler.", "err", err)
} }
return s return s
} }
@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(s.logger).Log("msg", "service informer unable to sync cache") s.logger.Error("service informer unable to sync cache")
} }
return return
} }
@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b
} }
eps, err := convertToService(o) eps, err := convertToService(o)
if err != nil { if err != nil {
level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err) s.logger.Error("converting to Service object failed", "err", err)
return true return true
} }
send(ctx, ch, s.buildService(eps)) send(ctx, ch, s.buildService(eps))

View file

@ -17,13 +17,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/linode/linodego" "github.com/linode/linodego"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -138,7 +138,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*linodeMetrics) m, ok := metrics.(*linodeMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -19,10 +19,10 @@ import (
"net/url" "net/url"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -238,7 +238,7 @@ func TestLinodeSDRefresh(t *testing.T) {
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
endpoint, err := url.Parse(sdmock.Endpoint()) endpoint, err := url.Parse(sdmock.Endpoint())
require.NoError(t, err) require.NoError(t, err)

View file

@ -16,14 +16,14 @@ package discovery
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"reflect" "reflect"
"sync" "sync"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere
} }
// NewManager is the Discovery Manager constructor. // NewManager is the Discovery Manager constructor.
func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
mgr := &Manager{ mgr := &Manager{
logger: logger, logger: logger,
@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re
if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil {
mgr.metrics = metrics mgr.metrics = metrics
} else { } else {
level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err)
return nil return nil
} }
@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) {
// Manager maintains a set of discovery providers and sends each update to a map channel. // Manager maintains a set of discovery providers and sends each update to a map channel.
// Targets are grouped by the target set name. // Targets are grouped by the target set name.
type Manager struct { type Manager struct {
logger log.Logger logger *slog.Logger
name string name string
httpOpts []config.HTTPClientOption httpOpts []config.HTTPClientOption
mtx sync.RWMutex mtx sync.RWMutex
@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D
} }
func (m *Manager) startProvider(ctx context.Context, p *Provider) { func (m *Manager) startProvider(ctx context.Context, p *Provider) {
level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs))
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
updates := make(chan []*targetgroup.Group) updates := make(chan []*targetgroup.Group)
@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ
case tgs, ok := <-updates: case tgs, ok := <-updates:
m.metrics.ReceivedUpdates.Inc() m.metrics.ReceivedUpdates.Inc()
if !ok { if !ok {
level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) m.logger.Debug("Discoverer channel closed", "provider", p.name)
// Wait for provider cancellation to ensure targets are cleaned up when expected. // Wait for provider cancellation to ensure targets are cleaned up when expected.
<-ctx.Done() <-ctx.Done()
return return
@ -364,7 +364,7 @@ func (m *Manager) sender() {
case m.syncCh <- m.allGroups(): case m.syncCh <- m.allGroups():
default: default:
m.metrics.DelayedUpdates.Inc() m.metrics.DelayedUpdates.Inc()
level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle")
select { select {
case m.triggerSend <- struct{}{}: case m.triggerSend <- struct{}{}:
default: default:
@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int {
} }
typ := cfg.Name() typ := cfg.Name()
d, err := cfg.NewDiscoverer(DiscovererOptions{ d, err := cfg.NewDiscoverer(DiscovererOptions{
Logger: log.With(m.logger, "discovery", typ, "config", setName), Logger: m.logger.With("discovery", typ, "config", setName),
HTTPClientOptions: m.httpOpts, HTTPClientOptions: m.httpOpts,
Metrics: m.sdMetrics[typ], Metrics: m.sdMetrics[typ],
}) })
if err != nil { if err != nil {
level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName)
failed++ failed++
return return
} }

View file

@ -22,10 +22,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
client_testutil "github.com/prometheus/client_golang/prometheus/testutil" client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
@ -675,7 +675,7 @@ func TestTargetUpdatesOrder(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
@ -791,7 +791,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -828,7 +828,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -868,7 +868,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -911,7 +911,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -979,7 +979,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1023,7 +1023,7 @@ func TestDiscovererConfigs(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1060,7 +1060,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1141,7 +1141,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1202,7 +1202,7 @@ func TestGaugeFailedConfigs(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1454,7 +1454,7 @@ func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg) _, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
discoveryManager.updatert = 100 * time.Millisecond discoveryManager.updatert = 100 * time.Millisecond
go discoveryManager.Run() go discoveryManager.Run()
@ -1551,7 +1551,7 @@ func TestUnregisterMetrics(t *testing.T) {
refreshMetrics, sdMetrics := NewTestMetrics(t, reg) refreshMetrics, sdMetrics := NewTestMetrics(t, reg)
discoveryManager := NewManager(ctx, log.NewNopLogger(), reg, sdMetrics) discoveryManager := NewManager(ctx, promslog.NewNopLogger(), reg, sdMetrics)
// discoveryManager will be nil if there was an error configuring metrics. // discoveryManager will be nil if there was an error configuring metrics.
require.NotNil(t, discoveryManager) require.NotNil(t, discoveryManager)
// Unregister all metrics. // Unregister all metrics.

View file

@ -19,6 +19,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"math/rand" "math/rand"
"net" "net"
"net/http" "net/http"
@ -27,7 +28,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -140,7 +140,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Marathon Discovery. // NewDiscovery returns a new Marathon Discovery.
func NewDiscovery(conf SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*marathonMetrics) m, ok := metrics.(*marathonMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -16,6 +16,7 @@ package moby
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -28,7 +29,6 @@ import (
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/network"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -128,7 +128,7 @@ type DockerDiscovery struct {
} }
// NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets. // NewDockerDiscovery returns a new DockerDiscovery which periodically refreshes its targets.
func NewDockerDiscovery(conf *DockerSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) { func NewDockerDiscovery(conf *DockerSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*DockerDiscovery, error) {
m, ok := metrics.(*dockerMetrics) m, ok := metrics.(*dockerMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -19,9 +19,9 @@ import (
"sort" "sort"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -48,7 +48,7 @@ host: %s
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -226,7 +226,7 @@ host: %s
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDockerDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDockerDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()

View file

@ -16,13 +16,13 @@ package moby
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"time" "time"
"github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -125,7 +125,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *DockerSwarmSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *DockerSwarmSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*dockerswarmMetrics) m, ok := metrics.(*dockerswarmMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -18,9 +18,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -48,7 +48,7 @@ host: %s
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()

View file

@ -18,9 +18,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -48,7 +48,7 @@ host: %s
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -349,7 +349,7 @@ filters:
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()

View file

@ -18,9 +18,9 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -48,7 +48,7 @@ host: %s
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()

View file

@ -17,12 +17,12 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
nomad "github.com/hashicorp/nomad/api" nomad "github.com/hashicorp/nomad/api"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -121,7 +121,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*nomadMetrics) m, ok := metrics.(*nomadMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -21,9 +21,9 @@ import (
"net/url" "net/url"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -160,7 +160,7 @@ func TestNomadSDRefresh(t *testing.T) {
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
tgs, err := d.refresh(context.Background()) tgs, err := d.refresh(context.Background())

View file

@ -16,10 +16,10 @@ package openstack
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors"
@ -43,14 +43,14 @@ type HypervisorDiscovery struct {
provider *gophercloud.ProviderClient provider *gophercloud.ProviderClient
authOpts *gophercloud.AuthOptions authOpts *gophercloud.AuthOptions
region string region string
logger log.Logger logger *slog.Logger
port int port int
availability gophercloud.Availability availability gophercloud.Availability
} }
// newHypervisorDiscovery returns a new hypervisor discovery. // newHypervisorDiscovery returns a new hypervisor discovery.
func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
port int, region string, availability gophercloud.Availability, l log.Logger, port int, region string, availability gophercloud.Availability, l *slog.Logger,
) *HypervisorDiscovery { ) *HypervisorDiscovery {
return &HypervisorDiscovery{ return &HypervisorDiscovery{
provider: provider, authOpts: opts, provider: provider, authOpts: opts,

View file

@ -16,17 +16,17 @@ package openstack
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips"
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers" "github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
"github.com/gophercloud/gophercloud/pagination" "github.com/gophercloud/gophercloud/pagination"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil" "github.com/prometheus/prometheus/util/strutil"
@ -52,7 +52,7 @@ type InstanceDiscovery struct {
provider *gophercloud.ProviderClient provider *gophercloud.ProviderClient
authOpts *gophercloud.AuthOptions authOpts *gophercloud.AuthOptions
region string region string
logger log.Logger logger *slog.Logger
port int port int
allTenants bool allTenants bool
availability gophercloud.Availability availability gophercloud.Availability
@ -60,10 +60,10 @@ type InstanceDiscovery struct {
// NewInstanceDiscovery returns a new instance discovery. // NewInstanceDiscovery returns a new instance discovery.
func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions, func newInstanceDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
port int, region string, allTenants bool, availability gophercloud.Availability, l log.Logger, port int, region string, allTenants bool, availability gophercloud.Availability, l *slog.Logger,
) *InstanceDiscovery { ) *InstanceDiscovery {
if l == nil { if l == nil {
l = log.NewNopLogger() l = promslog.NewNopLogger()
} }
return &InstanceDiscovery{ return &InstanceDiscovery{
provider: provider, authOpts: opts, provider: provider, authOpts: opts,
@ -134,7 +134,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
for _, s := range instanceList { for _, s := range instanceList {
if len(s.Addresses) == 0 { if len(s.Addresses) == 0 {
level.Info(i.logger).Log("msg", "Got no IP address", "instance", s.ID) i.logger.Info("Got no IP address", "instance", s.ID)
continue continue
} }
@ -151,7 +151,7 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
if !nameOk { if !nameOk {
flavorID, idOk := s.Flavor["id"].(string) flavorID, idOk := s.Flavor["id"].(string)
if !idOk { if !idOk {
level.Warn(i.logger).Log("msg", "Invalid type for both flavor original_name and flavor id, expected string") i.logger.Warn("Invalid type for both flavor original_name and flavor id, expected string")
continue continue
} }
labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID) labels[openstackLabelInstanceFlavor] = model.LabelValue(flavorID)
@ -171,22 +171,22 @@ func (i *InstanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group,
for pool, address := range s.Addresses { for pool, address := range s.Addresses {
md, ok := address.([]interface{}) md, ok := address.([]interface{})
if !ok { if !ok {
level.Warn(i.logger).Log("msg", "Invalid type for address, expected array") i.logger.Warn("Invalid type for address, expected array")
continue continue
} }
if len(md) == 0 { if len(md) == 0 {
level.Debug(i.logger).Log("msg", "Got no IP address", "instance", s.ID) i.logger.Debug("Got no IP address", "instance", s.ID)
continue continue
} }
for _, address := range md { for _, address := range md {
md1, ok := address.(map[string]interface{}) md1, ok := address.(map[string]interface{})
if !ok { if !ok {
level.Warn(i.logger).Log("msg", "Invalid type for address, expected dict") i.logger.Warn("Invalid type for address, expected dict")
continue continue
} }
addr, ok := md1["addr"].(string) addr, ok := md1["addr"].(string)
if !ok { if !ok {
level.Warn(i.logger).Log("msg", "Invalid type for address, expected string") i.logger.Warn("Invalid type for address, expected string")
continue continue
} }
if _, ok := floatingIPPresent[addr]; ok { if _, ok := floatingIPPresent[addr]; ok {

View file

@ -17,10 +17,10 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/http" "net/http"
"time" "time"
"github.com/go-kit/log"
"github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack" "github.com/gophercloud/gophercloud/openstack"
"github.com/mwitkow/go-conntrack" "github.com/mwitkow/go-conntrack"
@ -142,7 +142,7 @@ type refresher interface {
} }
// NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets. // NewDiscovery returns a new OpenStack Discoverer which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, l *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*openstackMetrics) m, ok := metrics.(*openstackMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
@ -163,7 +163,7 @@ func NewDiscovery(conf *SDConfig, l log.Logger, metrics discovery.DiscovererMetr
), nil ), nil
} }
func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) { func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
var opts gophercloud.AuthOptions var opts gophercloud.AuthOptions
if conf.IdentityEndpoint == "" { if conf.IdentityEndpoint == "" {
var err error var err error

View file

@ -16,13 +16,12 @@ package ovhcloud
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net/netip" "net/netip"
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/ovh/go-ovh/ovh" "github.com/ovh/go-ovh/ovh"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -55,10 +54,10 @@ type dedicatedServer struct {
type dedicatedServerDiscovery struct { type dedicatedServerDiscovery struct {
*refresh.Discovery *refresh.Discovery
config *SDConfig config *SDConfig
logger log.Logger logger *slog.Logger
} }
func newDedicatedServerDiscovery(conf *SDConfig, logger log.Logger) *dedicatedServerDiscovery { func newDedicatedServerDiscovery(conf *SDConfig, logger *slog.Logger) *dedicatedServerDiscovery {
return &dedicatedServerDiscovery{config: conf, logger: logger} return &dedicatedServerDiscovery{config: conf, logger: logger}
} }
@ -115,10 +114,7 @@ func (d *dedicatedServerDiscovery) refresh(context.Context) ([]*targetgroup.Grou
for _, dedicatedServerName := range dedicatedServerList { for _, dedicatedServerName := range dedicatedServerList {
dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName) dedicatedServer, err := getDedicatedServerDetails(client, dedicatedServerName)
if err != nil { if err != nil {
err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error()) d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), dedicatedServerName), "err", err.Error())
if err != nil {
return nil, err
}
continue continue
} }
dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer) dedicatedServerDetailedList = append(dedicatedServerDetailedList, *dedicatedServer)

View file

@ -21,8 +21,8 @@ import (
"os" "os"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
@ -41,7 +41,7 @@ application_secret: %s
consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest) consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecretTest, ovhcloudConsumerKeyTest)
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
d, err := newRefresher(&cfg, log.NewNopLogger()) d, err := newRefresher(&cfg, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
targetGroups, err := d.refresh(ctx) targetGroups, err := d.refresh(ctx)

View file

@ -17,10 +17,10 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/netip" "net/netip"
"time" "time"
"github.com/go-kit/log"
"github.com/ovh/go-ovh/ovh" "github.com/ovh/go-ovh/ovh"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -137,7 +137,7 @@ func parseIPList(ipList []string) ([]netip.Addr, error) {
return ipAddresses, nil return ipAddresses, nil
} }
func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) { func newRefresher(conf *SDConfig, logger *slog.Logger) (refresher, error) {
switch conf.Service { switch conf.Service {
case "vps": case "vps":
return newVpsDiscovery(conf, logger), nil return newVpsDiscovery(conf, logger), nil
@ -148,7 +148,7 @@ func newRefresher(conf *SDConfig, logger log.Logger) (refresher, error) {
} }
// NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets. // NewDiscovery returns a new OVHcloud Discoverer which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*ovhcloudMetrics) m, ok := metrics.(*ovhcloudMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -20,11 +20,11 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/util/testutil"
) )
var ( var (
@ -121,7 +121,7 @@ func TestParseIPs(t *testing.T) {
func TestDiscoverer(t *testing.T) { func TestDiscoverer(t *testing.T) {
conf, _ := getMockConf("vps") conf, _ := getMockConf("vps")
logger := testutil.NewLogger(t) logger := promslog.NewNopLogger()
reg := prometheus.NewRegistry() reg := prometheus.NewRegistry()
refreshMetrics := discovery.NewRefreshMetrics(reg) refreshMetrics := discovery.NewRefreshMetrics(reg)

View file

@ -16,13 +16,12 @@ package ovhcloud
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net/netip" "net/netip"
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/ovh/go-ovh/ovh" "github.com/ovh/go-ovh/ovh"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -68,10 +67,10 @@ type virtualPrivateServer struct {
type vpsDiscovery struct { type vpsDiscovery struct {
*refresh.Discovery *refresh.Discovery
config *SDConfig config *SDConfig
logger log.Logger logger *slog.Logger
} }
func newVpsDiscovery(conf *SDConfig, logger log.Logger) *vpsDiscovery { func newVpsDiscovery(conf *SDConfig, logger *slog.Logger) *vpsDiscovery {
return &vpsDiscovery{config: conf, logger: logger} return &vpsDiscovery{config: conf, logger: logger}
} }
@ -133,10 +132,7 @@ func (d *vpsDiscovery) refresh(context.Context) ([]*targetgroup.Group, error) {
for _, vpsName := range vpsList { for _, vpsName := range vpsList {
vpsDetailed, err := getVpsDetails(client, vpsName) vpsDetailed, err := getVpsDetails(client, vpsName)
if err != nil { if err != nil {
err := level.Warn(d.logger).Log("msg", fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error()) d.logger.Warn(fmt.Sprintf("%s: Could not get details of %s", d.getSource(), vpsName), "err", err.Error())
if err != nil {
return nil, err
}
continue continue
} }
vpsDetailedList = append(vpsDetailedList, *vpsDetailed) vpsDetailedList = append(vpsDetailedList, *vpsDetailed)

View file

@ -23,8 +23,8 @@ import (
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
"github.com/go-kit/log"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -43,7 +43,7 @@ consumer_key: %s`, mock.URL, ovhcloudApplicationKeyTest, ovhcloudApplicationSecr
require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg)) require.NoError(t, yaml.UnmarshalStrict([]byte(cfgString), &cfg))
d, err := newRefresher(&cfg, log.NewNopLogger()) d, err := newRefresher(&cfg, promslog.NewNopLogger())
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
targetGroups, err := d.refresh(ctx) targetGroups, err := d.refresh(ctx)

View file

@ -19,6 +19,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"
@ -27,11 +28,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/grafana/regexp" "github.com/grafana/regexp"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -138,14 +139,14 @@ type Discovery struct {
} }
// NewDiscovery returns a new PuppetDB discovery for the given config. // NewDiscovery returns a new PuppetDB discovery for the given config.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*puppetdbMetrics) m, ok := metrics.(*puppetdbMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http") client, err := config.NewClientFromConfig(conf.HTTPClientConfig, "http")

View file

@ -22,10 +22,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -70,7 +70,7 @@ func TestPuppetSlashInURL(t *testing.T) {
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, apiURL, d.url) require.Equal(t, apiURL, d.url)
@ -94,7 +94,7 @@ func TestPuppetDBRefresh(t *testing.T) {
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -142,7 +142,7 @@ func TestPuppetDBRefreshWithParameters(t *testing.T) {
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -201,7 +201,7 @@ func TestPuppetDBInvalidCode(t *testing.T) {
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()
@ -229,7 +229,7 @@ func TestPuppetDBInvalidFormat(t *testing.T) {
metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics) metrics := cfg.NewDiscovererMetrics(reg, refreshMetrics)
require.NoError(t, metrics.Register()) require.NoError(t, metrics.Register())
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
ctx := context.Background() ctx := context.Background()

View file

@ -16,17 +16,17 @@ package refresh
import ( import (
"context" "context"
"errors" "errors"
"log/slog"
"time" "time"
"github.com/go-kit/log" "github.com/prometheus/common/promslog"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
) )
type Options struct { type Options struct {
Logger log.Logger Logger *slog.Logger
Mech string Mech string
Interval time.Duration Interval time.Duration
RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) RefreshF func(ctx context.Context) ([]*targetgroup.Group, error)
@ -35,7 +35,7 @@ type Options struct {
// Discovery implements the Discoverer interface. // Discovery implements the Discoverer interface.
type Discovery struct { type Discovery struct {
logger log.Logger logger *slog.Logger
interval time.Duration interval time.Duration
refreshf func(ctx context.Context) ([]*targetgroup.Group, error) refreshf func(ctx context.Context) ([]*targetgroup.Group, error)
metrics *discovery.RefreshMetrics metrics *discovery.RefreshMetrics
@ -45,9 +45,9 @@ type Discovery struct {
func NewDiscovery(opts Options) *Discovery { func NewDiscovery(opts Options) *Discovery {
m := opts.MetricsInstantiator.Instantiate(opts.Mech) m := opts.MetricsInstantiator.Instantiate(opts.Mech)
var logger log.Logger var logger *slog.Logger
if opts.Logger == nil { if opts.Logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} else { } else {
logger = opts.Logger logger = opts.Logger
} }
@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
tgs, err := d.refresh(ctx) tgs, err := d.refresh(ctx)
if err != nil { if err != nil {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) d.logger.Error("Unable to refresh target groups", "err", err.Error())
} }
} else { } else {
select { select {
@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
tgs, err := d.refresh(ctx) tgs, err := d.refresh(ctx)
if err != nil { if err != nil {
if !errors.Is(ctx.Err(), context.Canceled) { if !errors.Is(ctx.Err(), context.Canceled) {
level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) d.logger.Error("Unable to refresh target groups", "err", err.Error())
} }
continue continue
} }

View file

@ -17,12 +17,12 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/http" "net/http"
"os" "os"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -185,7 +185,7 @@ func init() {
// the Discoverer interface. // the Discoverer interface.
type Discovery struct{} type Discovery struct{}
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*refresh.Discovery, error) {
m, ok := metrics.(*scalewayMetrics) m, ok := metrics.(*scalewayMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -19,12 +19,12 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/mwitkow/go-conntrack" "github.com/mwitkow/go-conntrack"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -146,7 +146,7 @@ type Discovery struct {
} }
// New returns a new Discovery which periodically refreshes its targets. // New returns a new Discovery which periodically refreshes its targets.
func New(logger log.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) { func New(logger *slog.Logger, conf *SDConfig, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*tritonMetrics) m, ok := metrics.(*tritonMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -17,6 +17,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@ -24,7 +25,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/kolo/xmlrpc" "github.com/kolo/xmlrpc"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
@ -109,7 +109,7 @@ type Discovery struct {
entitlement string entitlement string
separator string separator string
interval time.Duration interval time.Duration
logger log.Logger logger *slog.Logger
} }
// NewDiscovererMetrics implements discovery.Config. // NewDiscovererMetrics implements discovery.Config.
@ -212,7 +212,7 @@ func getEndpointInfoForSystems(
} }
// NewDiscovery returns a uyuni discovery for the given configuration. // NewDiscovery returns a uyuni discovery for the given configuration.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*uyuniMetrics) m, ok := metrics.(*uyuniMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -16,13 +16,13 @@ package vultr
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net" "net"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -114,7 +114,7 @@ type Discovery struct {
} }
// NewDiscovery returns a new Discovery which periodically refreshes its targets. // NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) { func NewDiscovery(conf *SDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (*Discovery, error) {
m, ok := metrics.(*vultrMetrics) m, ok := metrics.(*vultrMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")

View file

@ -19,9 +19,9 @@ import (
"net/url" "net/url"
"testing" "testing"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -57,7 +57,7 @@ func TestVultrSDRefresh(t *testing.T) {
defer metrics.Unregister() defer metrics.Unregister()
defer refreshMetrics.Unregister() defer refreshMetrics.Unregister()
d, err := NewDiscovery(&cfg, log.NewNopLogger(), metrics) d, err := NewDiscovery(&cfg, promslog.NewNopLogger(), metrics)
require.NoError(t, err) require.NoError(t, err)
endpoint, err := url.Parse(sdMock.Mock.Endpoint()) endpoint, err := url.Parse(sdMock.Mock.Endpoint())
require.NoError(t, err) require.NoError(t, err)

View file

@ -15,14 +15,14 @@ package xds
import ( import (
"fmt" "fmt"
"log/slog"
"net/url" "net/url"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/anypb"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
@ -99,7 +99,7 @@ func (c *KumaSDConfig) SetDirectory(dir string) {
func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) { func (c *KumaSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
logger := opts.Logger logger := opts.Logger
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
return NewKumaHTTPDiscovery(c, logger, opts.Metrics) return NewKumaHTTPDiscovery(c, logger, opts.Metrics)
@ -158,7 +158,7 @@ func kumaMadsV1ResourceParser(resources []*anypb.Any, typeURL string) ([]model.L
return targets, nil return targets, nil
} }
func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) { func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger *slog.Logger, metrics discovery.DiscovererMetrics) (discovery.Discoverer, error) {
m, ok := metrics.(*xdsMetrics) m, ok := metrics.(*xdsMetrics)
if !ok { if !ok {
return nil, fmt.Errorf("invalid discovery metrics type") return nil, fmt.Errorf("invalid discovery metrics type")
@ -170,7 +170,7 @@ func NewKumaHTTPDiscovery(conf *KumaSDConfig, logger log.Logger, metrics discove
var err error var err error
clientID, err = osutil.GetFQDN() clientID, err = osutil.GetFQDN()
if err != nil { if err != nil {
level.Debug(logger).Log("msg", "error getting FQDN", "err", err) logger.Debug("error getting FQDN", "err", err)
clientID = "prometheus" clientID = "prometheus"
} }
} }

View file

@ -23,13 +23,14 @@ package xds
import ( import (
context "context" context "context"
reflect "reflect"
sync "sync"
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
_ "github.com/envoyproxy/protoc-gen-validate/validate" _ "github.com/envoyproxy/protoc-gen-validate/validate"
_ "google.golang.org/genproto/googleapis/api/annotations" _ "google.golang.org/genproto/googleapis/api/annotations"
protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl" protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
) )
const ( const (

View file

@ -15,11 +15,10 @@ package xds
import ( import (
"context" "context"
"log/slog"
"time" "time"
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/common/config" "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/encoding/protojson"
@ -104,7 +103,7 @@ type fetchDiscovery struct {
refreshInterval time.Duration refreshInterval time.Duration
parseResources resourceParser parseResources resourceParser
logger log.Logger logger *slog.Logger
metrics *xdsMetrics metrics *xdsMetrics
} }
@ -140,7 +139,7 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou
} }
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "error parsing resources", "err", err) d.logger.Error("error parsing resources", "err", err)
d.metrics.fetchFailuresCount.Inc() d.metrics.fetchFailuresCount.Inc()
return return
} }
@ -153,12 +152,12 @@ func (d *fetchDiscovery) poll(ctx context.Context, ch chan<- []*targetgroup.Grou
parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl) parsedTargets, err := d.parseResources(response.Resources, response.TypeUrl)
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "error parsing resources", "err", err) d.logger.Error("error parsing resources", "err", err)
d.metrics.fetchFailuresCount.Inc() d.metrics.fetchFailuresCount.Inc()
return return
} }
level.Debug(d.logger).Log("msg", "Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets)) d.logger.Debug("Updated to version", "version", response.VersionInfo, "targets", len(parsedTargets))
select { select {
case <-ctx.Done(): case <-ctx.Done():

View file

@ -22,9 +22,9 @@ import (
"time" "time"
v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/goleak" "go.uber.org/goleak"
"google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/anypb"
@ -90,7 +90,7 @@ func constantResourceParser(targets []model.LabelSet, err error) resourceParser
} }
} }
var nopLogger = log.NewNopLogger() var nopLogger = promslog.NewNopLogger()
type testResourceClient struct { type testResourceClient struct {
resourceTypeURL string resourceTypeURL string

View file

@ -18,15 +18,16 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/go-kit/log"
"github.com/go-zookeeper/zk" "github.com/go-zookeeper/zk"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
@ -146,16 +147,16 @@ type Discovery struct {
treeCaches []*treecache.ZookeeperTreeCache treeCaches []*treecache.ZookeeperTreeCache
parse func(data []byte, path string) (model.LabelSet, error) parse func(data []byte, path string) (model.LabelSet, error)
logger log.Logger logger *slog.Logger
} }
// NewNerveDiscovery returns a new Discovery for the given Nerve config. // NewNerveDiscovery returns a new Discovery for the given Nerve config.
func NewNerveDiscovery(conf *NerveSDConfig, logger log.Logger) (*Discovery, error) { func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) {
return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember) return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember)
} }
// NewServersetDiscovery returns a new Discovery for the given serverset config. // NewServersetDiscovery returns a new Discovery for the given serverset config.
func NewServersetDiscovery(conf *ServersetSDConfig, logger log.Logger) (*Discovery, error) { func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) {
return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember) return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember)
} }
@ -165,11 +166,11 @@ func NewDiscovery(
srvs []string, srvs []string,
timeout time.Duration, timeout time.Duration,
paths []string, paths []string,
logger log.Logger, logger *slog.Logger,
pf func(data []byte, path string) (model.LabelSet, error), pf func(data []byte, path string) (model.LabelSet, error),
) (*Discovery, error) { ) (*Discovery, error) {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
conn, _, err := zk.Connect( conn, _, err := zk.Connect(

View file

@ -18,6 +18,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"net" "net"
"net/http" "net/http"
"os" "os"
@ -26,10 +27,9 @@ import (
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
prom_discovery "github.com/prometheus/prometheus/discovery" prom_discovery "github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/discovery/targetgroup"
@ -41,7 +41,7 @@ var (
a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.") a = kingpin.New("sd adapter usage", "Tool to generate file_sd target files for unimplemented SD mechanisms.")
outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String() outputFile = a.Flag("output.file", "Output file for file_sd compatible file.").Default("custom_sd.json").String()
listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String() listenAddress = a.Flag("listen.address", "The address the Consul HTTP API is listening on for requests.").Default("localhost:8500").String()
logger log.Logger logger *slog.Logger
// addressLabel is the name for the label containing a target's address. // addressLabel is the name for the label containing a target's address.
addressLabel = model.MetaLabelPrefix + "consul_address" addressLabel = model.MetaLabelPrefix + "consul_address"
@ -90,7 +90,7 @@ type discovery struct {
address string address string
refreshInterval int refreshInterval int
tagSeparator string tagSeparator string
logger log.Logger logger *slog.Logger
oldSourceList map[string]bool oldSourceList map[string]bool
} }
@ -164,7 +164,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
var srvs map[string][]string var srvs map[string][]string
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address)) resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error getting services list", "err", err) d.logger.Error("Error getting services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second) time.Sleep(time.Duration(d.refreshInterval) * time.Second)
continue continue
} }
@ -173,7 +173,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
io.Copy(io.Discard, resp.Body) io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error reading services list", "err", err) d.logger.Error("Error reading services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second) time.Sleep(time.Duration(d.refreshInterval) * time.Second)
continue continue
} }
@ -181,7 +181,7 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
err = json.Unmarshal(b, &srvs) err = json.Unmarshal(b, &srvs)
resp.Body.Close() resp.Body.Close()
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error parsing services list", "err", err) d.logger.Error("Error parsing services list", "err", err)
time.Sleep(time.Duration(d.refreshInterval) * time.Second) time.Sleep(time.Duration(d.refreshInterval) * time.Second)
continue continue
} }
@ -200,13 +200,13 @@ func (d *discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
} }
resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name)) resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/service/%s", d.address, name))
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error getting services nodes", "service", name, "err", err) d.logger.Error("Error getting services nodes", "service", name, "err", err)
break break
} }
tg, err := d.parseServiceNodes(resp, name) tg, err := d.parseServiceNodes(resp, name)
if err != nil { if err != nil {
level.Error(d.logger).Log("msg", "Error parsing services nodes", "service", name, "err", err) d.logger.Error("Error parsing services nodes", "service", name, "err", err)
break break
} }
tgs = append(tgs, tg) tgs = append(tgs, tg)
@ -254,8 +254,7 @@ func main() {
fmt.Println("err: ", err) fmt.Println("err: ", err)
return return
} }
logger = log.NewSyncLogger(log.NewLogfmtLogger(os.Stdout)) logger = promslog.New(&promslog.Config{})
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
ctx := context.Background() ctx := context.Background()
@ -272,7 +271,7 @@ func main() {
} }
if err != nil { if err != nil {
level.Error(logger).Log("msg", "failed to create discovery metrics", "err", err) logger.Error("failed to create discovery metrics", "err", err)
os.Exit(1) os.Exit(1)
} }
@ -280,7 +279,7 @@ func main() {
refreshMetrics := prom_discovery.NewRefreshMetrics(reg) refreshMetrics := prom_discovery.NewRefreshMetrics(reg)
metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics) metrics, err := prom_discovery.RegisterSDMetrics(reg, refreshMetrics)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "failed to register service discovery metrics", "err", err) logger.Error("failed to register service discovery metrics", "err", err)
os.Exit(1) os.Exit(1)
} }

View file

@ -18,13 +18,12 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"sort" "sort"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@ -55,7 +54,7 @@ type Adapter struct {
manager *discovery.Manager manager *discovery.Manager
output string output string
name string name string
logger log.Logger logger *slog.Logger
} }
func mapToArray(m map[string]*customSD) []customSD { func mapToArray(m map[string]*customSD) []customSD {
@ -106,7 +105,7 @@ func (a *Adapter) refreshTargetGroups(allTargetGroups map[string][]*targetgroup.
a.groups = tempGroups a.groups = tempGroups
err := a.writeOutput() err := a.writeOutput()
if err != nil { if err != nil {
level.Error(log.With(a.logger, "component", "sd-adapter")).Log("err", err) a.logger.With("component", "sd-adapter").Error("failed to write output", "err", err)
} }
} }
} }
@ -163,7 +162,7 @@ func (a *Adapter) Run() {
} }
// NewAdapter creates a new instance of Adapter. // NewAdapter creates a new instance of Adapter.
func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter { func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger *slog.Logger, sdMetrics map[string]discovery.DiscovererMetrics, registerer prometheus.Registerer) *Adapter {
return &Adapter{ return &Adapter{
ctx: ctx, ctx: ctx,
disc: d, disc: d,

View file

@ -4,7 +4,6 @@ go 1.22.0
require ( require (
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.11.6 github.com/influxdata/influxdb v1.11.6
@ -26,6 +25,7 @@ require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect github.com/dennwc/varint v1.0.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-kit/log v0.2.1 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect

View file

@ -16,19 +16,19 @@ package graphite
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"log/slog"
"math" "math"
"net" "net"
"sort" "sort"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
) )
// Client allows sending batches of Prometheus samples to Graphite. // Client allows sending batches of Prometheus samples to Graphite.
type Client struct { type Client struct {
logger log.Logger logger *slog.Logger
address string address string
transport string transport string
@ -37,9 +37,9 @@ type Client struct {
} }
// NewClient creates a new Client. // NewClient creates a new Client.
func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client { func NewClient(logger *slog.Logger, address, transport string, timeout time.Duration, prefix string) *Client {
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
return &Client{ return &Client{
logger: logger, logger: logger,
@ -93,7 +93,7 @@ func (c *Client) Write(samples model.Samples) error {
t := float64(s.Timestamp.UnixNano()) / 1e9 t := float64(s.Timestamp.UnixNano()) / 1e9
v := float64(s.Value) v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) { if math.IsNaN(v) || math.IsInf(v, 0) {
level.Debug(c.logger).Log("msg", "Cannot send value to Graphite, skipping sample", "value", v, "sample", s) c.logger.Debug("Cannot send value to Graphite, skipping sample", "value", v, "sample", s)
continue continue
} }
fmt.Fprintf(&buf, "%s %f %f\n", k, v, t) fmt.Fprintf(&buf, "%s %f %f\n", k, v, t)

View file

@ -17,22 +17,22 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"math" "math"
"os" "os"
"strings" "strings"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
influx "github.com/influxdata/influxdb/client/v2" influx "github.com/influxdata/influxdb/client/v2"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
) )
// Client allows sending batches of Prometheus samples to InfluxDB. // Client allows sending batches of Prometheus samples to InfluxDB.
type Client struct { type Client struct {
logger log.Logger logger *slog.Logger
client influx.Client client influx.Client
database string database string
@ -41,16 +41,16 @@ type Client struct {
} }
// NewClient creates a new Client. // NewClient creates a new Client.
func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client { func NewClient(logger *slog.Logger, conf influx.HTTPConfig, db, rp string) *Client {
c, err := influx.NewHTTPClient(conf) c, err := influx.NewHTTPClient(conf)
// Currently influx.NewClient() *should* never return an error. // Currently influx.NewClient() *should* never return an error.
if err != nil { if err != nil {
level.Error(logger).Log("err", err) logger.Error("Error creating influx HTTP client", "err", err)
os.Exit(1) os.Exit(1)
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
return &Client{ return &Client{
@ -84,7 +84,7 @@ func (c *Client) Write(samples model.Samples) error {
for _, s := range samples { for _, s := range samples {
v := float64(s.Value) v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) { if math.IsNaN(v) || math.IsInf(v, 0) {
level.Debug(c.logger).Log("msg", "Cannot send to InfluxDB, skipping sample", "value", v, "sample", s) c.logger.Debug("Cannot send to InfluxDB, skipping sample", "value", v, "sample", s)
c.ignoredSamples.Inc() c.ignoredSamples.Inc()
continue continue
} }

View file

@ -17,6 +17,7 @@ package main
import ( import (
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
_ "net/http/pprof" _ "net/http/pprof"
"net/url" "net/url"
@ -26,16 +27,14 @@ import (
"time" "time"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/golang/snappy" "github.com/golang/snappy"
influx "github.com/influxdata/influxdb/client/v2" influx "github.com/influxdata/influxdb/client/v2"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promlog" "github.com/prometheus/common/promslog"
"github.com/prometheus/common/promlog/flag" "github.com/prometheus/common/promslog/flag"
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/graphite"
"github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb" "github.com/prometheus/prometheus/documentation/examples/remote_storage/remote_storage_adapter/influxdb"
@ -57,7 +56,7 @@ type config struct {
remoteTimeout time.Duration remoteTimeout time.Duration
listenAddr string listenAddr string
telemetryPath string telemetryPath string
promlogConfig promlog.Config promslogConfig promslog.Config
} }
var ( var (
@ -105,11 +104,11 @@ func main() {
cfg := parseFlags() cfg := parseFlags()
http.Handle(cfg.telemetryPath, promhttp.Handler()) http.Handle(cfg.telemetryPath, promhttp.Handler())
logger := promlog.New(&cfg.promlogConfig) logger := promslog.New(&cfg.promslogConfig)
writers, readers := buildClients(logger, cfg) writers, readers := buildClients(logger, cfg)
if err := serve(logger, cfg.listenAddr, writers, readers); err != nil { if err := serve(logger, cfg.listenAddr, writers, readers); err != nil {
level.Error(logger).Log("msg", "Failed to listen", "addr", cfg.listenAddr, "err", err) logger.Error("Failed to listen", "addr", cfg.listenAddr, "err", err)
os.Exit(1) os.Exit(1)
} }
} }
@ -120,7 +119,7 @@ func parseFlags() *config {
cfg := &config{ cfg := &config{
influxdbPassword: os.Getenv("INFLUXDB_PW"), influxdbPassword: os.Getenv("INFLUXDB_PW"),
promlogConfig: promlog.Config{}, promslogConfig: promslog.Config{},
} }
a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty."). a.Flag("graphite-address", "The host:port of the Graphite server to send samples to. None, if empty.").
@ -146,7 +145,7 @@ func parseFlags() *config {
a.Flag("web.telemetry-path", "Address to listen on for web endpoints."). a.Flag("web.telemetry-path", "Address to listen on for web endpoints.").
Default("/metrics").StringVar(&cfg.telemetryPath) Default("/metrics").StringVar(&cfg.telemetryPath)
flag.AddFlags(a, &cfg.promlogConfig) flag.AddFlags(a, &cfg.promslogConfig)
_, err := a.Parse(os.Args[1:]) _, err := a.Parse(os.Args[1:])
if err != nil { if err != nil {
@ -168,19 +167,19 @@ type reader interface {
Name() string Name() string
} }
func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) { func buildClients(logger *slog.Logger, cfg *config) ([]writer, []reader) {
var writers []writer var writers []writer
var readers []reader var readers []reader
if cfg.graphiteAddress != "" { if cfg.graphiteAddress != "" {
c := graphite.NewClient( c := graphite.NewClient(
log.With(logger, "storage", "Graphite"), logger.With("storage", "Graphite"),
cfg.graphiteAddress, cfg.graphiteTransport, cfg.graphiteAddress, cfg.graphiteTransport,
cfg.remoteTimeout, cfg.graphitePrefix) cfg.remoteTimeout, cfg.graphitePrefix)
writers = append(writers, c) writers = append(writers, c)
} }
if cfg.opentsdbURL != "" { if cfg.opentsdbURL != "" {
c := opentsdb.NewClient( c := opentsdb.NewClient(
log.With(logger, "storage", "OpenTSDB"), logger.With("storage", "OpenTSDB"),
cfg.opentsdbURL, cfg.opentsdbURL,
cfg.remoteTimeout, cfg.remoteTimeout,
) )
@ -189,7 +188,7 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) {
if cfg.influxdbURL != "" { if cfg.influxdbURL != "" {
url, err := url.Parse(cfg.influxdbURL) url, err := url.Parse(cfg.influxdbURL)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err) logger.Error("Failed to parse InfluxDB URL", "url", cfg.influxdbURL, "err", err)
os.Exit(1) os.Exit(1)
} }
conf := influx.HTTPConfig{ conf := influx.HTTPConfig{
@ -199,7 +198,7 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) {
Timeout: cfg.remoteTimeout, Timeout: cfg.remoteTimeout,
} }
c := influxdb.NewClient( c := influxdb.NewClient(
log.With(logger, "storage", "InfluxDB"), logger.With("storage", "InfluxDB"),
conf, conf,
cfg.influxdbDatabase, cfg.influxdbDatabase,
cfg.influxdbRetentionPolicy, cfg.influxdbRetentionPolicy,
@ -208,15 +207,15 @@ func buildClients(logger log.Logger, cfg *config) ([]writer, []reader) {
writers = append(writers, c) writers = append(writers, c)
readers = append(readers, c) readers = append(readers, c)
} }
level.Info(logger).Log("msg", "Starting up...") logger.Info("Starting up...")
return writers, readers return writers, readers
} }
func serve(logger log.Logger, addr string, writers []writer, readers []reader) error { func serve(logger *slog.Logger, addr string, writers []writer, readers []reader) error {
http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) { http.HandleFunc("/write", func(w http.ResponseWriter, r *http.Request) {
req, err := remote.DecodeWriteRequest(r.Body) req, err := remote.DecodeWriteRequest(r.Body)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Read error", "err", err.Error()) logger.Error("Read error", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
@ -238,21 +237,21 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e
http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) { http.HandleFunc("/read", func(w http.ResponseWriter, r *http.Request) {
compressed, err := io.ReadAll(r.Body) compressed, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Read error", "err", err.Error()) logger.Error("Read error", "err", err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
reqBuf, err := snappy.Decode(nil, compressed) reqBuf, err := snappy.Decode(nil, compressed)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Decode error", "err", err.Error()) logger.Error("Decode error", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
var req prompb.ReadRequest var req prompb.ReadRequest
if err := proto.Unmarshal(reqBuf, &req); err != nil { if err := proto.Unmarshal(reqBuf, &req); err != nil {
level.Error(logger).Log("msg", "Unmarshal error", "err", err.Error()) logger.Error("Unmarshal error", "err", err.Error())
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
@ -267,7 +266,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e
var resp *prompb.ReadResponse var resp *prompb.ReadResponse
resp, err = reader.Read(&req) resp, err = reader.Read(&req)
if err != nil { if err != nil {
level.Warn(logger).Log("msg", "Error executing query", "query", req, "storage", reader.Name(), "err", err) logger.Warn("Error executing query", "query", req, "storage", reader.Name(), "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError) http.Error(w, err.Error(), http.StatusInternalServerError)
return return
} }
@ -283,7 +282,7 @@ func serve(logger log.Logger, addr string, writers []writer, readers []reader) e
compressed = snappy.Encode(nil, data) compressed = snappy.Encode(nil, data)
if _, err := w.Write(compressed); err != nil { if _, err := w.Write(compressed); err != nil {
level.Warn(logger).Log("msg", "Error writing response", "storage", reader.Name(), "err", err) logger.Warn("Error writing response", "storage", reader.Name(), "err", err)
} }
}) })
@ -309,12 +308,12 @@ func protoToSamples(req *prompb.WriteRequest) model.Samples {
return samples return samples
} }
func sendSamples(logger log.Logger, w writer, samples model.Samples) { func sendSamples(logger *slog.Logger, w writer, samples model.Samples) {
begin := time.Now() begin := time.Now()
err := w.Write(samples) err := w.Write(samples)
duration := time.Since(begin).Seconds() duration := time.Since(begin).Seconds()
if err != nil { if err != nil {
level.Warn(logger).Log("msg", "Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples)) logger.Warn("Error sending samples to remote storage", "err", err, "storage", w.Name(), "num_samples", len(samples))
failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) failedSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))
} }
sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples))) sentSamples.WithLabelValues(w.Name()).Add(float64(len(samples)))

View file

@ -19,13 +19,12 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"math" "math"
"net/http" "net/http"
"net/url" "net/url"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
@ -36,14 +35,14 @@ const (
// Client allows sending batches of Prometheus samples to OpenTSDB. // Client allows sending batches of Prometheus samples to OpenTSDB.
type Client struct { type Client struct {
logger log.Logger logger *slog.Logger
url string url string
timeout time.Duration timeout time.Duration
} }
// NewClient creates a new Client. // NewClient creates a new Client.
func NewClient(logger log.Logger, url string, timeout time.Duration) *Client { func NewClient(logger *slog.Logger, url string, timeout time.Duration) *Client {
return &Client{ return &Client{
logger: logger, logger: logger,
url: url, url: url,
@ -78,7 +77,7 @@ func (c *Client) Write(samples model.Samples) error {
for _, s := range samples { for _, s := range samples {
v := float64(s.Value) v := float64(s.Value)
if math.IsNaN(v) || math.IsInf(v, 0) { if math.IsNaN(v) || math.IsInf(v, 0) {
level.Debug(c.logger).Log("msg", "Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s) c.logger.Debug("Cannot send value to OpenTSDB, skipping sample", "value", v, "sample", s)
continue continue
} }
metric := TagValue(s.Metric[model.MetricNameLabel]) metric := TagValue(s.Metric[model.MetricNameLabel])

14
go.mod
View file

@ -24,8 +24,6 @@ require (
github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/envoyproxy/protoc-gen-validate v1.1.0
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb
github.com/fsnotify/fsnotify v1.7.0 github.com/fsnotify/fsnotify v1.7.0
github.com/go-kit/log v0.2.1
github.com/go-logfmt/logfmt v0.6.0
github.com/go-openapi/strfmt v0.23.0 github.com/go-openapi/strfmt v0.23.0
github.com/go-zookeeper/zk v1.0.4 github.com/go-zookeeper/zk v1.0.4
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
@ -54,10 +52,10 @@ require (
github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/alertmanager v0.27.0
github.com/prometheus/client_golang v1.20.4 github.com/prometheus/client_golang v1.20.4
github.com/prometheus/client_model v0.6.1 github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.59.1 github.com/prometheus/common v0.60.0
github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/assets v0.2.0
github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/common/sigv4 v0.1.0
github.com/prometheus/exporter-toolkit v0.12.0 github.com/prometheus/exporter-toolkit v0.13.0
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
@ -79,7 +77,6 @@ require (
golang.org/x/sync v0.8.0 golang.org/x/sync v0.8.0
golang.org/x/sys v0.26.0 golang.org/x/sys v0.26.0
golang.org/x/text v0.19.0 golang.org/x/text v0.19.0
golang.org/x/time v0.6.0
golang.org/x/tools v0.26.0 golang.org/x/tools v0.26.0
google.golang.org/api v0.199.0 google.golang.org/api v0.199.0
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1
@ -119,7 +116,6 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-kit/kit v0.12.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.22.2 // indirect github.com/go-openapi/analysis v0.22.2 // indirect
@ -195,6 +191,7 @@ require (
golang.org/x/mod v0.21.0 // indirect golang.org/x/mod v0.21.0 // indirect
golang.org/x/net v0.30.0 // indirect golang.org/x/net v0.30.0 // indirect
golang.org/x/term v0.25.0 // indirect golang.org/x/term v0.25.0 // indirect
golang.org/x/time v0.6.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
@ -207,11 +204,6 @@ require (
sigs.k8s.io/yaml v1.4.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect
) )
replace (
k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0
k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.5.0
)
// Exclude linodego v1.0.0 as it is no longer published on github. // Exclude linodego v1.0.0 as it is no longer published on github.
exclude github.com/linode/linodego v1.0.0 exclude github.com/linode/linodego v1.0.0

205
go.sum
View file

@ -61,13 +61,8 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8= github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY= github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -78,23 +73,17 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -102,8 +91,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@ -117,24 +104,16 @@ github.com/cilium/ebpf v0.11.0 h1:V8gS/bTCCjX9uUnkUFUpPsksM8n1lXBAvHcpiFk1X2Y=
github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs= github.com/cilium/ebpf v0.11.0/go.mod h1:WE7CZAnqOL2RouJ4f1uyNhqr2P4CCvXFIqdRDUgWsVs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg=
github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -142,7 +121,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg= github.com/digitalocean/godo v1.122.0 h1:ziytLQi8QKtDp2K1A+YrYl2dWLHLh2uaMzWvcz9HkKg=
github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/digitalocean/godo v1.122.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
@ -155,16 +133,10 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@ -182,11 +154,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
@ -198,17 +167,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -234,7 +197,6 @@ github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQ
github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE=
github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
@ -242,10 +204,7 @@ github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I
github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
@ -253,7 +212,6 @@ github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY=
github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -282,7 +240,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -324,7 +281,6 @@ github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -336,28 +292,18 @@ github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDP
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw=
github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE= github.com/hashicorp/consul/api v1.29.4 h1:P6slzxDLBOxUSj3fWo2o65VuKtbtOXFi7TSSgtXutuE=
github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg= github.com/hashicorp/consul/api v1.29.4/go.mod h1:HUlfw+l2Zy68ceJavv2zAyArl2fqhGWnMycyt56sBgg=
github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0= github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg= github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A=
@ -366,7 +312,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
@ -384,7 +329,6 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
@ -395,51 +339,38 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ=
github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY= github.com/ionos-cloud/sdk-go/v6 v6.2.1 h1:mxxN+frNVmbFrmmFfXnBC3g2USYJrl6mc1LW2iNYbFY=
github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI= github.com/ionos-cloud/sdk-go/v6 v6.2.1/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww=
github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -447,11 +378,9 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
@ -470,11 +399,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY=
github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
@ -485,7 +411,6 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
@ -493,7 +418,6 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g=
github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM= github.com/maxatome/go-testdeep v1.12.0/go.mod h1:lPZc/HAcJMP92l7yI6TRz1aZN5URwUBUAfUNvrclaNM=
@ -501,23 +425,16 @@ github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@ -538,64 +455,35 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 h1:dOYG7LS/WK00RWZc8XGgcUTlTxpp3mKhdR2Q9z9HbXM=
github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -608,54 +496,43 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P
github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I=
github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM=
github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/assets v0.2.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI=
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
github.com/prometheus/exporter-toolkit v0.12.0 h1:DkE5RcEZR3lQA2QD5JLVQIf41dFKNsVMXFhgqcif7fo= github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c=
github.com/prometheus/exporter-toolkit v0.12.0/go.mod h1:fQH0KtTn0yrrS0S82kqppRjDDiwMfIQUwT+RBRRhwUc= github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770=
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
@ -664,28 +541,14 @@ github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY=
github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M= github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/simonpasquier/klog-gokit v0.3.0 h1:TkFK21cbwDRS+CiystjqbAiq5ubJcVTk9hLUck5Ntcs=
github.com/simonpasquier/klog-gokit v0.3.0/go.mod h1:+SUlDQNrhVtGt2FieaqNftzzk8P72zpWlACateWxA9k=
github.com/simonpasquier/klog-gokit/v3 v3.5.0 h1:ewnk+ickph0hkQFgdI4pffKIbruAxxWcg0Fe/vQmLOM=
github.com/simonpasquier/klog-gokit/v3 v3.5.0/go.mod h1:S9flvRzzpaYLYtXI2w8jf9R/IU/Cy14NrbvDUevNP1E=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -703,28 +566,20 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -754,27 +609,18 @@ go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8d
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -822,13 +668,8 @@ golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -839,7 +680,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -893,11 +733,7 @@ golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -908,13 +744,11 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -978,22 +812,18 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@ -1005,8 +835,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -1014,7 +842,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@ -1041,7 +868,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@ -1059,7 +885,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
@ -1070,7 +895,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@ -1098,15 +922,10 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@ -1137,20 +956,13 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -1166,7 +978,6 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -1180,6 +991,10 @@ k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
@ -1191,7 +1006,5 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=

View file

@ -19,19 +19,19 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
"sync" "sync"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/go-openapi/strfmt" "github.com/go-openapi/strfmt"
"github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/sigv4" "github.com/prometheus/common/sigv4"
"github.com/prometheus/common/version" "github.com/prometheus/common/version"
"go.uber.org/atomic" "go.uber.org/atomic"
@ -117,7 +117,7 @@ type Manager struct {
stopRequested chan struct{} stopRequested chan struct{}
alertmanagers map[string]*alertmanagerSet alertmanagers map[string]*alertmanagerSet
logger log.Logger logger *slog.Logger
} }
// Options are the configurable parameters of a Handler. // Options are the configurable parameters of a Handler.
@ -218,12 +218,12 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
} }
// NewManager is the manager constructor. // NewManager is the manager constructor.
func NewManager(o *Options, logger log.Logger) *Manager { func NewManager(o *Options, logger *slog.Logger) *Manager {
if o.Do == nil { if o.Do == nil {
o.Do = do o.Do = do
} }
if logger == nil { if logger == nil {
logger = log.NewNopLogger() logger = promslog.NewNopLogger()
} }
n := &Manager{ n := &Manager{
@ -319,7 +319,7 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
}() }()
wg.Wait() wg.Wait()
level.Info(n.logger).Log("msg", "Notification manager stopped") n.logger.Info("Notification manager stopped")
} }
// sendLoop continuously consumes the notifications queue and sends alerts to // sendLoop continuously consumes the notifications queue and sends alerts to
@ -376,20 +376,20 @@ func (n *Manager) sendOneBatch() {
func (n *Manager) drainQueue() { func (n *Manager) drainQueue() {
if !n.opts.DrainOnShutdown { if !n.opts.DrainOnShutdown {
if n.queueLen() > 0 { if n.queueLen() > 0 {
level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen())
n.metrics.dropped.Add(float64(n.queueLen())) n.metrics.dropped.Add(float64(n.queueLen()))
} }
return return
} }
level.Info(n.logger).Log("msg", "Draining any remaining notifications...") n.logger.Info("Draining any remaining notifications...")
for n.queueLen() > 0 { for n.queueLen() > 0 {
n.sendOneBatch() n.sendOneBatch()
} }
level.Info(n.logger).Log("msg", "Remaining notifications drained") n.logger.Info("Remaining notifications drained")
} }
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
@ -399,7 +399,7 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
for id, tgroup := range tgs { for id, tgroup := range tgs {
am, ok := n.alertmanagers[id] am, ok := n.alertmanagers[id]
if !ok { if !ok {
level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id))
continue continue
} }
am.sync(tgroup) am.sync(tgroup)
@ -422,7 +422,7 @@ func (n *Manager) Send(alerts ...*Alert) {
if d := len(alerts) - n.opts.QueueCapacity; d > 0 { if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
alerts = alerts[d:] alerts = alerts[d:]
level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d)
n.metrics.dropped.Add(float64(d)) n.metrics.dropped.Add(float64(d))
} }
@ -431,7 +431,7 @@ func (n *Manager) Send(alerts ...*Alert) {
if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
n.queue = n.queue[d:] n.queue = n.queue[d:]
level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d) n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d)
n.metrics.dropped.Add(float64(d)) n.metrics.dropped.Add(float64(d))
} }
n.queue = append(n.queue, alerts...) n.queue = append(n.queue, alerts...)
@ -562,7 +562,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
if v1Payload == nil { if v1Payload == nil {
v1Payload, err = json.Marshal(amAlerts) v1Payload, err = json.Marshal(amAlerts)
if err != nil { if err != nil {
level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) n.logger.Error("Encoding alerts for Alertmanager API v1 failed", "err", err)
ams.mtx.RUnlock() ams.mtx.RUnlock()
return false return false
} }
@ -577,7 +577,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
v2Payload, err = json.Marshal(openAPIAlerts) v2Payload, err = json.Marshal(openAPIAlerts)
if err != nil { if err != nil {
level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err) n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err)
ams.mtx.RUnlock() ams.mtx.RUnlock()
return false return false
} }
@ -587,8 +587,8 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
} }
default: default:
{ {
level.Error(n.logger).Log( n.logger.Error(
"msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions),
"err", err, "err", err,
) )
ams.mtx.RUnlock() ams.mtx.RUnlock()
@ -609,7 +609,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) {
if err := n.sendOne(ctx, client, url, payload); err != nil { if err := n.sendOne(ctx, client, url, payload); err != nil {
level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err)
n.metrics.errors.WithLabelValues(url).Inc() n.metrics.errors.WithLabelValues(url).Inc()
} else { } else {
numSuccess.Inc() numSuccess.Inc()
@ -689,7 +689,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b
// //
// Stop is safe to call multiple times. // Stop is safe to call multiple times.
func (n *Manager) Stop() { func (n *Manager) Stop() {
level.Info(n.logger).Log("msg", "Stopping notification manager...") n.logger.Info("Stopping notification manager...")
n.stopOnce.Do(func() { n.stopOnce.Do(func() {
close(n.stopRequested) close(n.stopRequested)
@ -724,10 +724,10 @@ type alertmanagerSet struct {
mtx sync.RWMutex mtx sync.RWMutex
ams []alertmanager ams []alertmanager
droppedAms []alertmanager droppedAms []alertmanager
logger log.Logger logger *slog.Logger
} }
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager")
if err != nil { if err != nil {
return nil, err return nil, err
@ -761,7 +761,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
for _, tg := range tgs { for _, tg := range tgs {
ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg)
if err != nil { if err != nil {
level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) s.logger.Error("Creating discovered Alertmanagers failed", "err", err)
continue continue
} }
allAms = append(allAms, ams...) allAms = append(allAms, ams...)

View file

@ -26,11 +26,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config" config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/atomic" "go.uber.org/atomic"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -751,7 +751,7 @@ func TestHangingNotifier(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
sdManager := discovery.NewManager( sdManager := discovery.NewManager(
ctx, ctx,
log.NewNopLogger(), promslog.NewNopLogger(),
reg, reg,
sdMetrics, sdMetrics,
discovery.Name("sd-manager"), discovery.Name("sd-manager"),

View file

@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"math" "math"
"reflect" "reflect"
"runtime" "runtime"
@ -30,10 +31,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
@ -125,7 +125,11 @@ type QueryEngine interface {
// QueryLogger is an interface that can be used to log all the queries logged // QueryLogger is an interface that can be used to log all the queries logged
// by the engine. // by the engine.
type QueryLogger interface { type QueryLogger interface {
Log(...interface{}) error Error(msg string, args ...any)
Info(msg string, args ...any)
Debug(msg string, args ...any)
Warn(msg string, args ...any)
With(args ...any)
Close() error Close() error
} }
@ -288,7 +292,7 @@ type QueryTracker interface {
// EngineOpts contains configuration options used when creating a new Engine. // EngineOpts contains configuration options used when creating a new Engine.
type EngineOpts struct { type EngineOpts struct {
Logger log.Logger Logger *slog.Logger
Reg prometheus.Registerer Reg prometheus.Registerer
MaxSamples int MaxSamples int
Timeout time.Duration Timeout time.Duration
@ -326,7 +330,7 @@ type EngineOpts struct {
// Engine handles the lifetime of queries from beginning to end. // Engine handles the lifetime of queries from beginning to end.
// It is connected to a querier. // It is connected to a querier.
type Engine struct { type Engine struct {
logger log.Logger logger *slog.Logger
metrics *engineMetrics metrics *engineMetrics
timeout time.Duration timeout time.Duration
maxSamplesPerQuery int maxSamplesPerQuery int
@ -344,7 +348,7 @@ type Engine struct {
// NewEngine returns a new engine. // NewEngine returns a new engine.
func NewEngine(opts EngineOpts) *Engine { func NewEngine(opts EngineOpts) *Engine {
if opts.Logger == nil { if opts.Logger == nil {
opts.Logger = log.NewNopLogger() opts.Logger = promslog.NewNopLogger()
} }
queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{
@ -403,7 +407,7 @@ func NewEngine(opts EngineOpts) *Engine {
if opts.LookbackDelta == 0 { if opts.LookbackDelta == 0 {
opts.LookbackDelta = defaultLookbackDelta opts.LookbackDelta = defaultLookbackDelta
if l := opts.Logger; l != nil { if l := opts.Logger; l != nil {
level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) l.Debug("Lookback delta is zero, setting to default value", "value", defaultLookbackDelta)
} }
} }
@ -455,7 +459,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) {
// not make reload fail; only log a warning. // not make reload fail; only log a warning.
err := ng.queryLogger.Close() err := ng.queryLogger.Close()
if err != nil { if err != nil {
level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) ng.logger.Warn("Error while closing the previous query log file", "err", err)
} }
} }
@ -632,23 +636,23 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota
// The step provided by the user is in seconds. // The step provided by the user is in seconds.
params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond))
} }
f := []interface{}{"params", params} l.With("params", params)
if err != nil { if err != nil {
f = append(f, "error", err) l.With("error", err)
} }
f = append(f, "stats", stats.NewQueryStats(q.Stats())) l.With("stats", stats.NewQueryStats(q.Stats()))
if span := trace.SpanFromContext(ctx); span != nil { if span := trace.SpanFromContext(ctx); span != nil {
f = append(f, "spanID", span.SpanContext().SpanID()) l.With("spanID", span.SpanContext().SpanID())
} }
if origin := ctx.Value(QueryOrigin{}); origin != nil { if origin := ctx.Value(QueryOrigin{}); origin != nil {
for k, v := range origin.(map[string]interface{}) { for k, v := range origin.(map[string]interface{}) {
f = append(f, k, v) l.With(k, v)
} }
} }
if err := l.Log(f...); err != nil { l.Info("promql query logged")
ng.metrics.queryLogFailures.Inc() // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors?
level.Error(ng.logger).Log("msg", "can't log query", "err", err) // ng.metrics.queryLogFailures.Inc()
} // ng.logger.Error("can't log query", "err", err)
} }
ng.queryLoggerLock.RUnlock() ng.queryLoggerLock.RUnlock()
}() }()
@ -1059,7 +1063,7 @@ type evaluator struct {
maxSamples int maxSamples int
currentSamples int currentSamples int
logger log.Logger logger *slog.Logger
lookbackDelta time.Duration lookbackDelta time.Duration
samplesStats *stats.QuerySamples samplesStats *stats.QuerySamples
noStepSubqueryIntervalFn func(rangeMillis int64) int64 noStepSubqueryIntervalFn func(rangeMillis int64) int64
@ -1089,7 +1093,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp
buf := make([]byte, 64<<10) buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)] buf = buf[:runtime.Stack(buf, false)]
level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) ev.logger.Error("runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf))
*errp = fmt.Errorf("unexpected error: %w", err) *errp = fmt.Errorf("unexpected error: %w", err)
case errWithWarnings: case errWithWarnings:
*errp = err.err *errp = err.err

View file

@ -14,22 +14,21 @@
package promql package promql
import ( import (
"bytes"
"errors" "errors"
"testing" "testing"
"github.com/go-kit/log"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/annotations"
) )
func TestRecoverEvaluatorRuntime(t *testing.T) { func TestRecoverEvaluatorRuntime(t *testing.T) {
var output []interface{} var output bytes.Buffer
logger := log.Logger(log.LoggerFunc(func(keyvals ...interface{}) error { logger := promslog.New(&promslog.Config{Writer: &output})
output = append(output, keyvals...)
return nil
}))
ev := &evaluator{logger: logger} ev := &evaluator{logger: logger}
expr, _ := parser.ParseExpr("sum(up)") expr, _ := parser.ParseExpr("sum(up)")
@ -38,7 +37,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
defer func() { defer func() {
require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0") require.EqualError(t, err, "unexpected error: runtime error: index out of range [123] with length 0")
require.Contains(t, output, "sum(up)") require.Contains(t, output.String(), "sum(up)")
}() }()
defer ev.recover(expr, nil, &err) defer ev.recover(expr, nil, &err)
@ -48,7 +47,7 @@ func TestRecoverEvaluatorRuntime(t *testing.T) {
} }
func TestRecoverEvaluatorError(t *testing.T) { func TestRecoverEvaluatorError(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()} ev := &evaluator{logger: promslog.NewNopLogger()}
var err error var err error
e := errors.New("custom error") e := errors.New("custom error")
@ -62,7 +61,7 @@ func TestRecoverEvaluatorError(t *testing.T) {
} }
func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) { func TestRecoverEvaluatorErrorWithWarnings(t *testing.T) {
ev := &evaluator{logger: log.NewNopLogger()} ev := &evaluator{logger: promslog.NewNopLogger()}
var err error var err error
var ws annotations.Annotations var ws annotations.Annotations

View file

@ -2018,23 +2018,58 @@ func TestSubquerySelector(t *testing.T) {
type FakeQueryLogger struct { type FakeQueryLogger struct {
closed bool closed bool
logs []interface{} logs []interface{}
attrs []any
} }
func NewFakeQueryLogger() *FakeQueryLogger { func NewFakeQueryLogger() *FakeQueryLogger {
return &FakeQueryLogger{ return &FakeQueryLogger{
closed: false, closed: false,
logs: make([]interface{}, 0), logs: make([]interface{}, 0),
attrs: make([]any, 0),
} }
} }
// It implements the promql.QueryLogger interface.
func (f *FakeQueryLogger) Close() error { func (f *FakeQueryLogger) Close() error {
f.closed = true f.closed = true
return nil return nil
} }
func (f *FakeQueryLogger) Log(l ...interface{}) error { // It implements the promql.QueryLogger interface.
f.logs = append(f.logs, l...) func (f *FakeQueryLogger) Info(msg string, args ...any) {
return nil log := append([]any{msg}, args...)
log = append(log, f.attrs...)
f.attrs = f.attrs[:0]
f.logs = append(f.logs, log...)
}
// It implements the promql.QueryLogger interface.
func (f *FakeQueryLogger) Error(msg string, args ...any) {
log := append([]any{msg}, args...)
log = append(log, f.attrs...)
f.attrs = f.attrs[:0]
f.logs = append(f.logs, log...)
}
// It implements the promql.QueryLogger interface.
func (f *FakeQueryLogger) Warn(msg string, args ...any) {
log := append([]any{msg}, args...)
log = append(log, f.attrs...)
f.attrs = f.attrs[:0]
f.logs = append(f.logs, log...)
}
// It implements the promql.QueryLogger interface.
func (f *FakeQueryLogger) Debug(msg string, args ...any) {
log := append([]any{msg}, args...)
log = append(log, f.attrs...)
f.attrs = f.attrs[:0]
f.logs = append(f.logs, log...)
}
// It implements the promql.QueryLogger interface.
func (f *FakeQueryLogger) With(args ...any) {
f.attrs = append(f.attrs, args...)
} }
func TestQueryLogger_basic(t *testing.T) { func TestQueryLogger_basic(t *testing.T) {
@ -2062,9 +2097,8 @@ func TestQueryLogger_basic(t *testing.T) {
f1 := NewFakeQueryLogger() f1 := NewFakeQueryLogger()
engine.SetQueryLogger(f1) engine.SetQueryLogger(f1)
queryExec() queryExec()
for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}} { require.Contains(t, f1.logs, `params`)
require.Equal(t, field, f1.logs[i]) require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"})
}
l := len(f1.logs) l := len(f1.logs)
queryExec() queryExec()
@ -2110,11 +2144,8 @@ func TestQueryLogger_fields(t *testing.T) {
res := query.Exec(ctx) res := query.Exec(ctx)
require.NoError(t, res.Err) require.NoError(t, res.Err)
expected := []string{"foo", "bar"} require.Contains(t, f1.logs, `foo`)
for i, field := range expected { require.Contains(t, f1.logs, `bar`)
v := f1.logs[len(f1.logs)-len(expected)+i].(string)
require.Equal(t, field, v)
}
} }
func TestQueryLogger_error(t *testing.T) { func TestQueryLogger_error(t *testing.T) {
@ -2140,9 +2171,10 @@ func TestQueryLogger_error(t *testing.T) {
res := query.Exec(ctx) res := query.Exec(ctx)
require.Error(t, res.Err, "query should have failed") require.Error(t, res.Err, "query should have failed")
for i, field := range []interface{}{"params", map[string]interface{}{"query": "test statement"}, "error", testErr} { require.Contains(t, f1.logs, `params`)
require.Equal(t, f1.logs[i], field) require.Contains(t, f1.logs, map[string]interface{}{"query": "test statement"})
} require.Contains(t, f1.logs, `error`)
require.Contains(t, f1.logs, testErr)
} }
func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) { func TestPreprocessAndWrapWithStepInvariantExpr(t *testing.T) {

View file

@ -19,6 +19,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -26,14 +27,12 @@ import (
"unicode/utf8" "unicode/utf8"
"github.com/edsrzf/mmap-go" "github.com/edsrzf/mmap-go"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
) )
type ActiveQueryTracker struct { type ActiveQueryTracker struct {
mmappedFile []byte mmappedFile []byte
getNextIndex chan int getNextIndex chan int
logger log.Logger logger *slog.Logger
closer io.Closer closer io.Closer
maxConcurrent int maxConcurrent int
} }
@ -63,11 +62,11 @@ func parseBrokenJSON(brokenJSON []byte) (string, bool) {
return queries, true return queries, true
} }
func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { func logUnfinishedQueries(filename string, filesize int, logger *slog.Logger) {
if _, err := os.Stat(filename); err == nil { if _, err := os.Stat(filename); err == nil {
fd, err := os.Open(filename) fd, err := os.Open(filename)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to open query log file", "err", err) logger.Error("Failed to open query log file", "err", err)
return return
} }
defer fd.Close() defer fd.Close()
@ -75,7 +74,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
brokenJSON := make([]byte, filesize) brokenJSON := make([]byte, filesize)
_, err = fd.Read(brokenJSON) _, err = fd.Read(brokenJSON)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to read query log file", "err", err) logger.Error("Failed to read query log file", "err", err)
return return
} }
@ -83,7 +82,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
if !queriesExist { if !queriesExist {
return return
} }
level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries) logger.Info("These queries didn't finish in prometheus' last run:", "queries", queries)
} }
} }
@ -104,38 +103,38 @@ func (f *mmappedFile) Close() error {
return err return err
} }
func getMMappedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { func getMMappedFile(filename string, filesize int, logger *slog.Logger) ([]byte, io.Closer, error) {
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666)
if err != nil { if err != nil {
absPath, pathErr := filepath.Abs(filename) absPath, pathErr := filepath.Abs(filename)
if pathErr != nil { if pathErr != nil {
absPath = filename absPath = filename
} }
level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) logger.Error("Error opening query log file", "file", absPath, "err", err)
return nil, nil, err return nil, nil, err
} }
err = file.Truncate(int64(filesize)) err = file.Truncate(int64(filesize))
if err != nil { if err != nil {
file.Close() file.Close()
level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) logger.Error("Error setting filesize.", "filesize", filesize, "err", err)
return nil, nil, err return nil, nil, err
} }
fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0)
if err != nil { if err != nil {
file.Close() file.Close()
level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) logger.Error("Failed to mmap", "file", filename, "Attempted size", filesize, "err", err)
return nil, nil, err return nil, nil, err
} }
return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err
} }
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger *slog.Logger) *ActiveQueryTracker {
err := os.MkdirAll(localStoragePath, 0o777) err := os.MkdirAll(localStoragePath, 0o777)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Failed to create directory for logging active queries") logger.Error("Failed to create directory for logging active queries")
} }
filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize
@ -174,18 +173,18 @@ func trimStringByBytes(str string, size int) string {
return string(bytesStr[:trimIndex]) return string(bytesStr[:trimIndex])
} }
func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { func _newJSONEntry(query string, timestamp int64, logger *slog.Logger) []byte {
entry := Entry{query, timestamp} entry := Entry{query, timestamp}
jsonEntry, err := json.Marshal(entry) jsonEntry, err := json.Marshal(entry)
if err != nil { if err != nil {
level.Error(logger).Log("msg", "Cannot create json of query", "query", query) logger.Error("Cannot create json of query", "query", query)
return []byte{} return []byte{}
} }
return jsonEntry return jsonEntry
} }
func newJSONEntry(query string, logger log.Logger) []byte { func newJSONEntry(query string, logger *slog.Logger) []byte {
timestamp := time.Now().Unix() timestamp := time.Now().Unix()
minEntryJSON := _newJSONEntry("", timestamp, logger) minEntryJSON := _newJSONEntry("", timestamp, logger)

View file

@ -16,13 +16,12 @@ package rules
import ( import (
"context" "context"
"fmt" "fmt"
"log/slog"
"net/url" "net/url"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"go.uber.org/atomic" "go.uber.org/atomic"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -141,7 +140,7 @@ type AlertingRule struct {
// the fingerprint of the labelset they correspond to. // the fingerprint of the labelset they correspond to.
active map[uint64]*Alert active map[uint64]*Alert
logger log.Logger logger *slog.Logger
noDependentRules *atomic.Bool noDependentRules *atomic.Bool
noDependencyRules *atomic.Bool noDependencyRules *atomic.Bool
@ -151,7 +150,7 @@ type AlertingRule struct {
func NewAlertingRule( func NewAlertingRule(
name string, vec parser.Expr, hold, keepFiringFor time.Duration, name string, vec parser.Expr, hold, keepFiringFor time.Duration,
labels, annotations, externalLabels labels.Labels, externalURL string, labels, annotations, externalLabels labels.Labels, externalURL string,
restored bool, logger log.Logger, restored bool, logger *slog.Logger,
) *AlertingRule { ) *AlertingRule {
el := externalLabels.Map() el := externalLabels.Map()
@ -381,7 +380,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t
result, err := tmpl.Expand() result, err := tmpl.Expand()
if err != nil { if err != nil {
result = fmt.Sprintf("<error expanding template: %s>", err) result = fmt.Sprintf("<error expanding template: %s>", err)
level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData)
} }
return result return result
} }

View file

@ -19,8 +19,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/histogram"
@ -276,7 +276,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
ruleWithExternalLabels := NewAlertingRule( ruleWithExternalLabels := NewAlertingRule(
"ExternalLabelExists", "ExternalLabelExists",
@ -287,7 +287,7 @@ func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.FromStrings("foo", "bar", "dings", "bums"), labels.FromStrings("foo", "bar", "dings", "bums"),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
result := promql.Vector{ result := promql.Vector{
promql.Sample{ promql.Sample{
@ -371,7 +371,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
ruleWithExternalURL := NewAlertingRule( ruleWithExternalURL := NewAlertingRule(
"ExternalURLExists", "ExternalURLExists",
@ -382,7 +382,7 @@ func TestAlertingRuleExternalURLInTemplate(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"http://localhost:1234", "http://localhost:1234",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
result := promql.Vector{ result := promql.Vector{
promql.Sample{ promql.Sample{
@ -466,7 +466,7 @@ func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
result := promql.Vector{ result := promql.Vector{
promql.Sample{ promql.Sample{
@ -527,7 +527,7 @@ instance: {{ $v.Labels.instance }}, value: {{ printf "%.0f" $v.Value }};
`), `),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
@ -607,7 +607,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
_, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0) _, err := rule.Eval(ctx, 0, now, EngineQueryFunc(engine, storage), nil, 0)
require.Error(t, err) require.Error(t, err)
@ -651,7 +651,7 @@ func TestAlertingRuleLimit(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
evalTime := time.Unix(0, 0) evalTime := time.Unix(0, 0)
@ -779,7 +779,7 @@ func TestSendAlertsDontAffectActiveAlerts(t *testing.T) {
}, },
}, },
} }
nm := notifier.NewManager(&opts, log.NewNopLogger()) nm := notifier.NewManager(&opts, promslog.NewNopLogger())
f := SendAlerts(nm, "") f := SendAlerts(nm, "")
notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) { notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) {
@ -986,7 +986,7 @@ func TestAlertingEvalWithOrigin(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
_, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) { _, err = rule.Eval(ctx, 0, now, func(ctx context.Context, qs string, _ time.Time) (promql.Vector, error) {
@ -1008,7 +1008,7 @@ func TestAlertingRule_SetNoDependentRules(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
require.False(t, rule.NoDependentRules()) require.False(t, rule.NoDependentRules())
@ -1029,7 +1029,7 @@ func TestAlertingRule_SetNoDependencyRules(t *testing.T) {
labels.EmptyLabels(), labels.EmptyLabels(),
labels.EmptyLabels(), labels.EmptyLabels(),
"", "",
true, log.NewNopLogger(), true, promslog.NewNopLogger(),
) )
require.False(t, rule.NoDependencyRules()) require.False(t, rule.NoDependencyRules())

View file

@ -16,6 +16,7 @@ package rules
import ( import (
"context" "context"
"errors" "errors"
"log/slog"
"math" "math"
"slices" "slices"
"strings" "strings"
@ -26,10 +27,9 @@ import (
"github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/promql/parser"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
@ -65,7 +65,7 @@ type Group struct {
terminated chan struct{} terminated chan struct{}
managerDone chan struct{} managerDone chan struct{}
logger log.Logger logger *slog.Logger
metrics *Metrics metrics *Metrics
@ -124,6 +124,10 @@ func NewGroup(o GroupOptions) *Group {
concurrencyController = sequentialRuleEvalController{} concurrencyController = sequentialRuleEvalController{}
} }
if o.Opts.Logger == nil {
promslog.NewNopLogger()
}
return &Group{ return &Group{
name: o.Name, name: o.Name,
file: o.File, file: o.File,
@ -137,7 +141,7 @@ func NewGroup(o GroupOptions) *Group {
done: make(chan struct{}), done: make(chan struct{}),
managerDone: o.done, managerDone: o.done,
terminated: make(chan struct{}), terminated: make(chan struct{}),
logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), logger: o.Opts.Logger.With("file", o.File, "group", o.Name),
metrics: metrics, metrics: metrics,
evalIterationFunc: evalIterationFunc, evalIterationFunc: evalIterationFunc,
concurrencyController: concurrencyController, concurrencyController: concurrencyController,
@ -200,7 +204,7 @@ func (g *Group) Interval() time.Duration { return g.interval }
// Limit returns the group's limit. // Limit returns the group's limit.
func (g *Group) Limit() int { return g.limit } func (g *Group) Limit() int { return g.limit }
func (g *Group) Logger() log.Logger { return g.logger } func (g *Group) Logger() *slog.Logger { return g.logger }
func (g *Group) run(ctx context.Context) { func (g *Group) run(ctx context.Context) {
defer close(g.terminated) defer close(g.terminated)
@ -272,7 +276,7 @@ func (g *Group) run(ctx context.Context) {
g.RestoreForState(restoreStartTime) g.RestoreForState(restoreStartTime)
totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds() totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds()
g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds) g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds)
level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) g.logger.Debug("'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds)
g.shouldRestore = false g.shouldRestore = false
} }
@ -495,7 +499,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
defer cleanup() defer cleanup()
} }
logger := log.WithPrefix(g.logger, "name", rule.Name(), "index", i) logger := g.logger.With("name", rule.Name(), "index", i)
ctx, sp := otel.Tracer("").Start(ctx, "rule") ctx, sp := otel.Tracer("").Start(ctx, "rule")
sp.SetAttributes(attribute.String("name", rule.Name())) sp.SetAttributes(attribute.String("name", rule.Name()))
defer func(t time.Time) { defer func(t time.Time) {
@ -508,7 +512,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
}(time.Now()) }(time.Now())
if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() {
logger = log.WithPrefix(logger, "trace_id", sp.SpanContext().TraceID()) logger = logger.With("trace_id", sp.SpanContext().TraceID())
} }
g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
@ -524,7 +528,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
// happens on shutdown and thus we skip logging of any errors here. // happens on shutdown and thus we skip logging of any errors here.
var eqc promql.ErrQueryCanceled var eqc promql.ErrQueryCanceled
if !errors.As(err, &eqc) { if !errors.As(err, &eqc) {
level.Warn(logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) logger.Warn("Evaluating rule failed", "rule", rule, "err", err)
} }
return return
} }
@ -550,7 +554,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
sp.SetStatus(codes.Error, err.Error()) sp.SetStatus(codes.Error, err.Error())
g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc()
level.Warn(logger).Log("msg", "Rule sample appending failed", "err", err) logger.Warn("Rule sample appending failed", "err", err)
return return
} }
g.seriesInPreviousEval[i] = seriesReturned g.seriesInPreviousEval[i] = seriesReturned
@ -574,15 +578,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
switch { switch {
case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample):
numOutOfOrder++ numOutOfOrder++
level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrTooOldSample): case errors.Is(unwrappedErr, storage.ErrTooOldSample):
numTooOld++ numTooOld++
level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp):
numDuplicates++ numDuplicates++
level.Debug(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) logger.Debug("Rule evaluation result discarded", "err", err, "sample", s)
default: default:
level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) logger.Warn("Rule evaluation result discarded", "err", err, "sample", s)
} }
} else { } else {
buf := [1024]byte{} buf := [1024]byte{}
@ -590,13 +594,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
} }
} }
if numOutOfOrder > 0 { if numOutOfOrder > 0 {
level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder)
} }
if numTooOld > 0 { if numTooOld > 0 {
level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld)
} }
if numDuplicates > 0 { if numDuplicates > 0 {
level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates)
} }
for metric, lset := range g.seriesInPreviousEval[i] { for metric, lset := range g.seriesInPreviousEval[i] {
@ -615,7 +619,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) {
// Do not count these in logging, as this is expected if series // Do not count these in logging, as this is expected if series
// is exposed from a different rule. // is exposed from a different rule.
default: default:
level.Warn(logger).Log("msg", "Adding stale sample failed", "sample", lset.String(), "err", err) logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err)
} }
} }
} }
@ -672,11 +676,11 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) {
// Do not count these in logging, as this is expected if series // Do not count these in logging, as this is expected if series
// is exposed from a different rule. // is exposed from a different rule.
default: default:
level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err) g.logger.Warn("Adding stale sample for previous configuration failed", "sample", s, "err", err)
} }
} }
if err := app.Commit(); err != nil { if err := app.Commit(); err != nil {
level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err) g.logger.Warn("Stale sample appending for previous configuration failed", "err", err)
} else { } else {
g.staleSeries = nil g.staleSeries = nil
} }
@ -691,12 +695,12 @@ func (g *Group) RestoreForState(ts time.Time) {
mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
q, err := g.opts.Queryable.Querier(mintMS, maxtMS) q, err := g.opts.Queryable.Querier(mintMS, maxtMS)
if err != nil { if err != nil {
level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) g.logger.Error("Failed to get Querier", "err", err)
return return
} }
defer func() { defer func() {
if err := q.Close(); err != nil { if err := q.Close(); err != nil {
level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err) g.logger.Error("Failed to close Querier", "err", err)
} }
}() }()
@ -717,8 +721,8 @@ func (g *Group) RestoreForState(ts time.Time) {
sset, err := alertRule.QueryForStateSeries(g.opts.Context, q) sset, err := alertRule.QueryForStateSeries(g.opts.Context, q)
if err != nil { if err != nil {
level.Error(g.logger).Log( g.logger.Error(
"msg", "Failed to restore 'for' state", "Failed to restore 'for' state",
labels.AlertName, alertRule.Name(), labels.AlertName, alertRule.Name(),
"stage", "Select", "stage", "Select",
"err", err, "err", err,
@ -737,7 +741,7 @@ func (g *Group) RestoreForState(ts time.Time) {
// No results for this alert rule. // No results for this alert rule.
if len(seriesByLabels) == 0 { if len(seriesByLabels) == 0 {
level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) g.logger.Debug("No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name())
alertRule.SetRestored(true) alertRule.SetRestored(true)
continue continue
} }
@ -757,7 +761,7 @@ func (g *Group) RestoreForState(ts time.Time) {
t, v = it.At() t, v = it.At()
} }
if it.Err() != nil { if it.Err() != nil {
level.Error(g.logger).Log("msg", "Failed to restore 'for' state", g.logger.Error("Failed to restore 'for' state",
labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err()) labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
return return
} }
@ -799,7 +803,7 @@ func (g *Group) RestoreForState(ts time.Time) {
} }
a.ActiveAt = restoredActiveAt a.ActiveAt = restoredActiveAt
level.Debug(g.logger).Log("msg", "'for' state restored", g.logger.Debug("'for' state restored",
labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
"labels", a.Labels.String()) "labels", a.Labels.String())
}) })

View file

@ -17,15 +17,15 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net/url" "net/url"
"slices" "slices"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/promslog"
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
"github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/labels"
@ -96,7 +96,7 @@ type Manager struct {
done chan struct{} done chan struct{}
restored bool restored bool
logger log.Logger logger *slog.Logger
} }
// NotifyFunc sends notifications about a set of alerts generated by the given expression. // NotifyFunc sends notifications about a set of alerts generated by the given expression.
@ -110,7 +110,7 @@ type ManagerOptions struct {
Context context.Context Context context.Context
Appendable storage.Appendable Appendable storage.Appendable
Queryable storage.Queryable Queryable storage.Queryable
Logger log.Logger Logger *slog.Logger
Registerer prometheus.Registerer Registerer prometheus.Registerer
OutageTolerance time.Duration OutageTolerance time.Duration
ForGracePeriod time.Duration ForGracePeriod time.Duration
@ -148,6 +148,10 @@ func NewManager(o *ManagerOptions) *Manager {
o.RuleDependencyController = ruleDependencyController{} o.RuleDependencyController = ruleDependencyController{}
} }
if o.Logger == nil {
o.Logger = promslog.NewNopLogger()
}
m := &Manager{ m := &Manager{
groups: map[string]*Group{}, groups: map[string]*Group{},
opts: o, opts: o,
@ -161,7 +165,7 @@ func NewManager(o *ManagerOptions) *Manager {
// Run starts processing of the rule manager. It is blocking. // Run starts processing of the rule manager. It is blocking.
func (m *Manager) Run() { func (m *Manager) Run() {
level.Info(m.logger).Log("msg", "Starting rule manager...") m.logger.Info("Starting rule manager...")
m.start() m.start()
<-m.done <-m.done
} }
@ -175,7 +179,7 @@ func (m *Manager) Stop() {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
level.Info(m.logger).Log("msg", "Stopping rule manager...") m.logger.Info("Stopping rule manager...")
for _, eg := range m.groups { for _, eg := range m.groups {
eg.stop() eg.stop()
@ -185,7 +189,7 @@ func (m *Manager) Stop() {
// staleness markers. // staleness markers.
close(m.done) close(m.done)
level.Info(m.logger).Log("msg", "Rule manager stopped") m.logger.Info("Rule manager stopped")
} }
// Update the rule manager's state as the config requires. If // Update the rule manager's state as the config requires. If
@ -206,7 +210,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels
if errs != nil { if errs != nil {
for _, e := range errs { for _, e := range errs {
level.Error(m.logger).Log("msg", "loading groups failed", "err", e) m.logger.Error("loading groups failed", "err", e)
} }
return errors.New("error loading rules, previous rule set restored") return errors.New("error loading rules, previous rule set restored")
} }
@ -323,7 +327,7 @@ func (m *Manager) LoadGroups(
externalLabels, externalLabels,
externalURL, externalURL,
m.restored, m.restored,
log.With(m.logger, "alert", r.Alert), m.logger.With("alert", r.Alert),
)) ))
continue continue
} }

View file

@ -26,10 +26,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/atomic" "go.uber.org/atomic"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
@ -374,7 +374,7 @@ func TestForStateRestore(t *testing.T) {
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
OutageTolerance: 30 * time.Minute, OutageTolerance: 30 * time.Minute,
ForGracePeriod: 10 * time.Minute, ForGracePeriod: 10 * time.Minute,
@ -547,7 +547,7 @@ func TestStaleness(t *testing.T) {
Appendable: st, Appendable: st,
Queryable: st, Queryable: st,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("a + 1") expr, err := parser.ParseExpr("a + 1")
@ -641,7 +641,7 @@ groups:
require.NoError(t, err) require.NoError(t, err)
m := NewManager(&ManagerOptions{ m := NewManager(&ManagerOptions{
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
DefaultRuleQueryOffset: func() time.Duration { DefaultRuleQueryOffset: func() time.Duration {
return time.Minute return time.Minute
}, },
@ -781,7 +781,7 @@ func TestUpdate(t *testing.T) {
Queryable: st, Queryable: st,
QueryFunc: EngineQueryFunc(engine, st), QueryFunc: EngineQueryFunc(engine, st),
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
}) })
ruleManager.start() ruleManager.start()
defer ruleManager.Stop() defer ruleManager.Stop()
@ -923,14 +923,14 @@ func TestNotify(t *testing.T) {
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
NotifyFunc: notifyFunc, NotifyFunc: notifyFunc,
ResendDelay: 2 * time.Second, ResendDelay: 2 * time.Second,
} }
expr, err := parser.ParseExpr("a > 1") expr, err := parser.ParseExpr("a > 1")
require.NoError(t, err) require.NoError(t, err)
rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) rule := NewAlertingRule("aTooHigh", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
group := NewGroup(GroupOptions{ group := NewGroup(GroupOptions{
Name: "alert", Name: "alert",
Interval: time.Second, Interval: time.Second,
@ -994,7 +994,7 @@ func TestMetricsUpdate(t *testing.T) {
Queryable: storage, Queryable: storage,
QueryFunc: EngineQueryFunc(engine, storage), QueryFunc: EngineQueryFunc(engine, storage),
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
Registerer: registry, Registerer: registry,
}) })
ruleManager.start() ruleManager.start()
@ -1068,7 +1068,7 @@ func TestGroupStalenessOnRemoval(t *testing.T) {
Queryable: storage, Queryable: storage,
QueryFunc: EngineQueryFunc(engine, storage), QueryFunc: EngineQueryFunc(engine, storage),
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
}) })
var stopped bool var stopped bool
ruleManager.start() ruleManager.start()
@ -1145,7 +1145,7 @@ func TestMetricsStalenessOnManagerShutdown(t *testing.T) {
Queryable: storage, Queryable: storage,
QueryFunc: EngineQueryFunc(engine, storage), QueryFunc: EngineQueryFunc(engine, storage),
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
}) })
var stopped bool var stopped bool
ruleManager.start() ruleManager.start()
@ -1247,7 +1247,7 @@ func TestRuleHealthUpdates(t *testing.T) {
Appendable: st, Appendable: st,
Queryable: st, Queryable: st,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("a + 1") expr, err := parser.ParseExpr("a + 1")
@ -1345,7 +1345,7 @@ func TestRuleGroupEvalIterationFunc(t *testing.T) {
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {}, NotifyFunc: func(ctx context.Context, expr string, alerts ...*Alert) {},
OutageTolerance: 30 * time.Minute, OutageTolerance: 30 * time.Minute,
ForGracePeriod: 10 * time.Minute, ForGracePeriod: 10 * time.Minute,
@ -1431,7 +1431,7 @@ func TestNativeHistogramsInRecordingRules(t *testing.T) {
Appendable: storage, Appendable: storage,
Queryable: storage, Queryable: storage,
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("sum(histogram_metric)") expr, err := parser.ParseExpr("sum(histogram_metric)")
@ -1479,7 +1479,7 @@ func TestManager_LoadGroups_ShouldCheckWhetherEachRuleHasDependentsAndDependenci
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
Appendable: storage, Appendable: storage,
QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil }, QueryFunc: func(ctx context.Context, q string, ts time.Time) (promql.Vector, error) { return nil, nil },
}) })
@ -1535,7 +1535,7 @@ func TestDependencyMap(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opts := &ManagerOptions{ opts := &ManagerOptions{
Context: ctx, Context: ctx,
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
@ -1544,7 +1544,7 @@ func TestDependencyMap(t *testing.T) {
expr, err = parser.ParseExpr("user:requests:rate1m <= 0") expr, err = parser.ParseExpr("user:requests:rate1m <= 0")
require.NoError(t, err) require.NoError(t, err)
rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
expr, err = parser.ParseExpr("sum by (user) (rate(requests[5m]))") expr, err = parser.ParseExpr("sum by (user) (rate(requests[5m]))")
require.NoError(t, err) require.NoError(t, err)
@ -1584,7 +1584,7 @@ func TestNoDependency(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opts := &ManagerOptions{ opts := &ManagerOptions{
Context: ctx, Context: ctx,
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
@ -1607,7 +1607,7 @@ func TestDependenciesEdgeCases(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opts := &ManagerOptions{ opts := &ManagerOptions{
Context: ctx, Context: ctx,
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
t.Run("empty group", func(t *testing.T) { t.Run("empty group", func(t *testing.T) {
@ -1765,7 +1765,7 @@ func TestNoMetricSelector(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opts := &ManagerOptions{ opts := &ManagerOptions{
Context: ctx, Context: ctx,
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
@ -1794,7 +1794,7 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opts := &ManagerOptions{ opts := &ManagerOptions{
Context: ctx, Context: ctx,
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))") expr, err := parser.ParseExpr("sum by (user) (rate(requests[1m]))")
@ -1803,7 +1803,7 @@ func TestDependentRulesWithNonMetricExpression(t *testing.T) {
expr, err = parser.ParseExpr("user:requests:rate1m <= 0") expr, err = parser.ParseExpr("user:requests:rate1m <= 0")
require.NoError(t, err) require.NoError(t, err)
rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, log.NewNopLogger()) rule2 := NewAlertingRule("ZeroRequests", expr, 0, 0, labels.Labels{}, labels.Labels{}, labels.EmptyLabels(), "", true, promslog.NewNopLogger())
expr, err = parser.ParseExpr("3") expr, err = parser.ParseExpr("3")
require.NoError(t, err) require.NoError(t, err)
@ -1826,7 +1826,7 @@ func TestRulesDependentOnMetaMetrics(t *testing.T) {
ctx := context.Background() ctx := context.Background()
opts := &ManagerOptions{ opts := &ManagerOptions{
Context: ctx, Context: ctx,
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
} }
// This rule is not dependent on any other rules in its group but it does depend on `ALERTS`, which is produced by // This rule is not dependent on any other rules in its group but it does depend on `ALERTS`, which is produced by
@ -1855,7 +1855,7 @@ func TestDependencyMapUpdatesOnGroupUpdate(t *testing.T) {
files := []string{"fixtures/rules.yaml"} files := []string{"fixtures/rules.yaml"}
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
}) })
ruleManager.start() ruleManager.start()
@ -2107,7 +2107,7 @@ func TestUpdateWhenStopped(t *testing.T) {
files := []string{"fixtures/rules.yaml"} files := []string{"fixtures/rules.yaml"}
ruleManager := NewManager(&ManagerOptions{ ruleManager := NewManager(&ManagerOptions{
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
}) })
ruleManager.start() ruleManager.start()
err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil) err := ruleManager.Update(10*time.Second, files, labels.EmptyLabels(), "", nil)
@ -2129,7 +2129,7 @@ func optsFactory(storage storage.Storage, maxInflight, inflightQueries *atomic.I
return &ManagerOptions{ return &ManagerOptions{
Context: context.Background(), Context: context.Background(),
Logger: log.NewNopLogger(), Logger: promslog.NewNopLogger(),
ConcurrentEvalsEnabled: concurrent, ConcurrentEvalsEnabled: concurrent,
MaxConcurrentEvals: maxConcurrent, MaxConcurrentEvals: maxConcurrent,
Appendable: storage, Appendable: storage,

Some files were not shown because too many files have changed in this diff Show more